From b39f4861dd8408990d27ae2bfe069efe05da2829 Mon Sep 17 00:00:00 2001 From: kt Date: Thu, 18 Apr 2019 11:31:45 -0700 Subject: [PATCH] Upgrade to terraform v0.12-early4 SDK (#56) --- go.mod | 37 +- go.sum | 416 +- .../github.com/agext/levenshtein/.gitignore | 51 + .../github.com/agext/levenshtein/.travis.yml | 83 +- vendor/github.com/agext/levenshtein/README.md | 2 +- vendor/github.com/agext/levenshtein/go.mod | 1 + vendor/github.com/agext/levenshtein/test.sh | 10 + .../apparentlymart/go-cidr/cidr/cidr.go | 102 +- vendor/github.com/armon/go-radix/go.mod | 1 + .../aws-sdk-go/aws/awsutil/string_value.go | 25 +- .../aws/aws-sdk-go/aws/client/client.go | 10 +- .../aws-sdk-go/aws/client/default_retryer.go | 80 +- .../aws/aws-sdk-go/aws/client/logger.go | 106 +- .../aws/client/metadata/client_info.go | 1 + .../github.com/aws/aws-sdk-go/aws/config.go | 86 +- .../aws/{context.go => context_1_5.go} | 40 +- .../aws/aws-sdk-go/aws/context_1_6.go | 41 - .../aws/aws-sdk-go/aws/context_1_7.go | 9 - .../aws/aws-sdk-go/aws/context_1_9.go | 11 + .../aws-sdk-go/aws/context_background_1_5.go | 56 + .../aws-sdk-go/aws/context_background_1_7.go | 20 + .../aws/aws-sdk-go/aws/context_sleep.go | 24 + .../aws/aws-sdk-go/aws/convert_types.go | 18 + .../aws-sdk-go/aws/corehandlers/handlers.go | 32 +- .../aws-sdk-go/aws/corehandlers/user_agent.go | 37 + .../aws/credentials/chain_provider.go | 4 +- .../aws-sdk-go/aws/credentials/credentials.go | 64 +- .../ec2rolecreds/ec2_role_provider.go | 6 +- .../aws/credentials/endpointcreds/provider.go | 7 + .../aws/credentials/env_provider.go | 4 - .../aws/credentials/processcreds/provider.go | 425 + .../shared_credentials_provider.go | 30 +- .../aws/credentials/static_provider.go | 2 - .../github.com/aws/aws-sdk-go/aws/csm/doc.go | 46 + .../aws/aws-sdk-go/aws/csm/enable.go | 67 + .../aws/aws-sdk-go/aws/csm/metric.go | 109 + .../aws/aws-sdk-go/aws/csm/metric_chan.go | 54 + .../aws-sdk-go/aws/csm/metric_exception.go | 26 + .../aws/aws-sdk-go/aws/csm/reporter.go | 260 + .../aws/aws-sdk-go/aws/defaults/defaults.go | 66 +- .../aws/aws-sdk-go/aws/ec2metadata/api.go | 12 +- .../aws/aws-sdk-go/aws/ec2metadata/service.go | 25 + .../aws/aws-sdk-go/aws/endpoints/decode.go | 59 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 3034 ++- .../aws/endpoints/dep_service_ids.go | 141 + .../aws/aws-sdk-go/aws/endpoints/doc.go | 4 +- .../aws/aws-sdk-go/aws/endpoints/endpoints.go | 24 +- .../aws/aws-sdk-go/aws/endpoints/v3model.go | 12 +- .../aws/endpoints/v3model_codegen.go | 24 +- .../github.com/aws/aws-sdk-go/aws/errors.go | 4 - .../github.com/aws/aws-sdk-go/aws/logger.go | 10 +- .../aws/aws-sdk-go/aws/request/handlers.go | 21 + .../aws-sdk-go/aws/request/offset_reader.go | 4 +- .../aws/aws-sdk-go/aws/request/request.go | 386 +- .../aws/aws-sdk-go/aws/request/request_1_7.go | 2 +- .../aws/aws-sdk-go/aws/request/request_1_8.go | 2 +- .../aws/request/request_pagination.go | 40 +- .../aws/aws-sdk-go/aws/request/retryer.go | 9 +- .../aws/aws-sdk-go/aws/request/validation.go | 54 +- .../aws/aws-sdk-go/aws/request/waiter.go | 16 +- .../aws/aws-sdk-go/aws/session/doc.go | 6 +- .../aws/aws-sdk-go/aws/session/env_config.go | 54 +- .../aws/aws-sdk-go/aws/session/session.go | 194 +- .../aws-sdk-go/aws/session/shared_config.go | 108 +- .../aws/aws-sdk-go/aws/signer/v4/v4.go | 203 +- vendor/github.com/aws/aws-sdk-go/aws/types.go | 83 + .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/internal/ini/ast.go | 120 + .../aws-sdk-go/internal/ini/comma_token.go | 11 + .../aws-sdk-go/internal/ini/comment_token.go | 35 + .../aws/aws-sdk-go/internal/ini/doc.go | 29 + .../aws-sdk-go/internal/ini/empty_token.go | 4 + .../aws/aws-sdk-go/internal/ini/expression.go | 24 + .../aws/aws-sdk-go/internal/ini/fuzz.go | 17 + .../aws/aws-sdk-go/internal/ini/ini.go | 51 + .../aws/aws-sdk-go/internal/ini/ini_lexer.go | 165 + .../aws/aws-sdk-go/internal/ini/ini_parser.go | 347 + .../aws-sdk-go/internal/ini/literal_tokens.go | 324 + .../aws-sdk-go/internal/ini/newline_token.go | 30 + .../aws-sdk-go/internal/ini/number_helper.go | 152 + .../aws/aws-sdk-go/internal/ini/op_tokens.go | 39 + .../aws-sdk-go/internal/ini/parse_error.go | 43 + .../aws-sdk-go/internal/ini/parse_stack.go | 60 + .../aws/aws-sdk-go/internal/ini/sep_tokens.go | 41 + .../aws/aws-sdk-go/internal/ini/skipper.go | 45 + .../aws/aws-sdk-go/internal/ini/statement.go | 35 + .../aws/aws-sdk-go/internal/ini/value_util.go | 284 + .../aws/aws-sdk-go/internal/ini/visitor.go | 166 + .../aws/aws-sdk-go/internal/ini/walker.go | 25 + .../aws/aws-sdk-go/internal/ini/ws_token.go | 24 + .../aws/aws-sdk-go/internal/s3err/error.go | 57 + .../aws/aws-sdk-go/internal/sdkio/io_go1.6.go | 10 + .../aws/aws-sdk-go/internal/sdkio/io_go1.7.go | 12 + .../internal/sdkrand/locked_source.go | 29 + .../aws/aws-sdk-go/internal/sdkuri/path.go | 23 + .../internal/shareddefaults/ecs_container.go | 12 + .../private/protocol/eventstream/debug.go | 144 + .../private/protocol/eventstream/decode.go | 199 + .../private/protocol/eventstream/encode.go | 114 + .../private/protocol/eventstream/error.go | 23 + .../eventstream/eventstreamapi/api.go | 196 + .../eventstream/eventstreamapi/error.go | 24 + .../private/protocol/eventstream/header.go | 166 + .../protocol/eventstream/header_value.go | 501 + .../private/protocol/eventstream/message.go | 103 + .../aws/aws-sdk-go/private/protocol/host.go | 68 + .../private/protocol/host_prefix.go | 54 + .../aws-sdk-go/private/protocol/jsonvalue.go | 76 + .../aws-sdk-go/private/protocol/payload.go | 81 + .../private/protocol/query/build.go | 2 +- .../protocol/query/queryutil/queryutil.go | 11 +- .../private/protocol/query/unmarshal.go | 6 +- .../private/protocol/query/unmarshal_error.go | 14 +- .../aws-sdk-go/private/protocol/rest/build.go | 33 +- .../private/protocol/rest/unmarshal.go | 20 +- .../private/protocol/restxml/restxml.go | 12 +- .../aws-sdk-go/private/protocol/timestamp.go | 72 + .../private/protocol/xml/xmlutil/build.go | 32 +- .../private/protocol/xml/xmlutil/unmarshal.go | 20 +- .../protocol/xml/xmlutil/xml_to_struct.go | 1 + .../aws/aws-sdk-go/service/s3/api.go | 20494 ++++++++++------ .../aws/aws-sdk-go/service/s3/body_hash.go | 249 + .../aws/aws-sdk-go/service/s3/content_md5.go | 36 - .../aws-sdk-go/service/s3/customizations.go | 28 + .../aws/aws-sdk-go/service/s3/doc.go | 64 +- .../aws/aws-sdk-go/service/s3/doc_custom.go | 4 +- .../service/s3/host_style_bucket.go | 13 +- .../aws/aws-sdk-go/service/s3/service.go | 12 +- .../aws/aws-sdk-go/service/s3/sse.go | 18 +- .../aws-sdk-go/service/s3/statusok_error.go | 9 +- .../aws-sdk-go/service/s3/unmarshal_error.go | 52 +- .../aws/aws-sdk-go/service/s3/waiters.go | 8 +- .../aws/aws-sdk-go/service/sts/api.go | 317 +- .../aws/aws-sdk-go/service/sts/doc.go | 64 +- .../aws/aws-sdk-go/service/sts/service.go | 6 +- vendor/github.com/davecgh/go-spew/LICENSE | 2 +- .../github.com/davecgh/go-spew/spew/bypass.go | 187 +- .../davecgh/go-spew/spew/bypasssafe.go | 2 +- .../github.com/davecgh/go-spew/spew/common.go | 2 +- .../github.com/davecgh/go-spew/spew/dump.go | 10 +- .../github.com/davecgh/go-spew/spew/format.go | 4 +- vendor/github.com/go-ini/ini/.gitignore | 5 - vendor/github.com/go-ini/ini/.travis.yml | 16 - vendor/github.com/go-ini/ini/LICENSE | 191 - vendor/github.com/go-ini/ini/Makefile | 12 - vendor/github.com/go-ini/ini/README.md | 734 - vendor/github.com/go-ini/ini/README_ZH.md | 721 - vendor/github.com/go-ini/ini/error.go | 32 - vendor/github.com/go-ini/ini/ini.go | 535 - vendor/github.com/go-ini/ini/key.go | 633 - vendor/github.com/go-ini/ini/parser.go | 356 - vendor/github.com/go-ini/ini/section.go | 221 - vendor/github.com/go-ini/ini/struct.go | 431 - .../golang/protobuf/proto/decode.go | 1 - .../golang/protobuf/proto/deprecated.go | 63 + .../github.com/golang/protobuf/proto/equal.go | 3 +- .../golang/protobuf/proto/extensions.go | 78 +- .../github.com/golang/protobuf/proto/lib.go | 38 +- .../golang/protobuf/proto/message_set.go | 137 +- .../golang/protobuf/proto/pointer_reflect.go | 5 +- .../golang/protobuf/proto/pointer_unsafe.go | 15 +- .../golang/protobuf/proto/properties.go | 31 +- .../golang/protobuf/proto/table_marshal.go | 45 +- .../golang/protobuf/proto/table_unmarshal.go | 72 +- .../golang/protobuf/ptypes/any/any.pb.go | 45 +- .../golang/protobuf/ptypes/any/any.proto | 21 +- .../golang/protobuf/ptypes/duration.go | 2 +- .../protobuf/ptypes/duration/duration.pb.go | 26 +- .../protobuf/ptypes/struct/struct.pb.go | 178 +- .../golang/protobuf/ptypes/timestamp.go | 6 +- .../protobuf/ptypes/timestamp/timestamp.pb.go | 34 +- .../protobuf/ptypes/timestamp/timestamp.proto | 8 +- .../protobuf/ptypes/wrappers/wrappers.pb.go | 90 +- vendor/github.com/google/go-cmp/LICENSE | 27 + .../github.com/google/go-cmp/cmp/compare.go | 553 + .../go-cmp/cmp/internal/diff/debug_disable.go | 17 + .../go-cmp/cmp/internal/diff/debug_enable.go | 122 + .../google/go-cmp/cmp/internal/diff/diff.go | 363 + .../go-cmp/cmp/internal/function/func.go | 49 + .../go-cmp/cmp/internal/value/format.go | 277 + .../google/go-cmp/cmp/internal/value/sort.go | 111 + .../github.com/google/go-cmp/cmp/options.go | 453 + vendor/github.com/google/go-cmp/cmp/path.go | 309 + .../github.com/google/go-cmp/cmp/reporter.go | 53 + .../google/go-cmp/cmp/unsafe_panic.go | 15 + .../google/go-cmp/cmp/unsafe_reflect.go | 23 + .../grpc-gateway/runtime/mux.go | 2 +- .../grpc-gateway/runtime/pattern.go | 11 +- .../hashicorp/go-cleanhttp/cleanhttp.go | 1 + .../github.com/hashicorp/go-cleanhttp/go.mod | 1 + .../hashicorp/go-cleanhttp/handlers.go | 43 + .../hashicorp/go-getter/.travis.yml | 15 +- .../github.com/hashicorp/go-getter/README.md | 59 +- .../hashicorp/go-getter/appveyor.yml | 2 +- .../hashicorp/go-getter/checksum.go | 291 + .../github.com/hashicorp/go-getter/client.go | 148 +- .../hashicorp/go-getter/client_option.go | 46 + .../go-getter/client_option_progress.go | 38 + .../github.com/hashicorp/go-getter/common.go | 14 + .../hashicorp/go-getter/copy_dir.go | 6 +- .../hashicorp/go-getter/decompress.go | 25 + .../hashicorp/go-getter/decompress_tar.go | 37 +- .../hashicorp/go-getter/decompress_testing.go | 35 +- .../hashicorp/go-getter/decompress_zip.go | 5 + .../github.com/hashicorp/go-getter/detect.go | 1 + .../hashicorp/go-getter/detect_git.go | 26 + .../hashicorp/go-getter/detect_github.go | 26 - .../hashicorp/go-getter/detect_ssh.go | 49 + vendor/github.com/hashicorp/go-getter/get.go | 17 +- .../hashicorp/go-getter/get_base.go | 20 + .../hashicorp/go-getter/get_file.go | 6 +- .../hashicorp/go-getter/get_file_copy.go | 29 + .../hashicorp/go-getter/get_file_unix.go | 4 +- .../hashicorp/go-getter/get_file_windows.go | 24 +- .../github.com/hashicorp/go-getter/get_git.go | 97 +- .../github.com/hashicorp/go-getter/get_hg.go | 24 +- .../hashicorp/go-getter/get_http.go | 130 +- .../hashicorp/go-getter/get_mock.go | 2 + .../github.com/hashicorp/go-getter/get_s3.go | 17 +- vendor/github.com/hashicorp/go-getter/go.mod | 24 + vendor/github.com/hashicorp/go-getter/go.sum | 44 + .../go-getter/helper/url/url_windows.go | 15 +- .../github.com/hashicorp/go-getter/source.go | 23 +- .../github.com/hashicorp/go-hclog/.gitignore | 1 + .../github.com/hashicorp/go-hclog/README.md | 14 +- vendor/github.com/hashicorp/go-hclog/go.mod | 7 + vendor/github.com/hashicorp/go-hclog/go.sum | 6 + vendor/github.com/hashicorp/go-hclog/int.go | 172 +- vendor/github.com/hashicorp/go-hclog/log.go | 27 +- .../hashicorp/go-hclog/nulllogger.go | 47 + .../github.com/hashicorp/go-plugin/README.md | 22 +- .../github.com/hashicorp/go-plugin/client.go | 541 +- vendor/github.com/hashicorp/go-plugin/go.mod | 17 + vendor/github.com/hashicorp/go-plugin/go.sum | 31 + .../hashicorp/go-plugin/grpc_broker.go | 457 + .../hashicorp/go-plugin/grpc_client.go | 58 +- .../hashicorp/go-plugin/grpc_controller.go | 23 + .../hashicorp/go-plugin/grpc_server.go | 43 +- .../go-plugin/internal/plugin/gen.go | 3 + .../internal/plugin/grpc_broker.pb.go | 203 + .../internal/plugin/grpc_broker.proto | 15 + .../internal/plugin/grpc_controller.pb.go | 143 + .../internal/plugin/grpc_controller.proto | 11 + .../hashicorp/go-plugin/log_entry.go | 4 +- vendor/github.com/hashicorp/go-plugin/mtls.go | 73 + .../github.com/hashicorp/go-plugin/plugin.go | 8 +- .../github.com/hashicorp/go-plugin/server.go | 184 +- .../github.com/hashicorp/go-plugin/testing.go | 70 +- .../github.com/hashicorp/go-safetemp/LICENSE | 362 + .../hashicorp/go-safetemp/README.md | 10 + .../github.com/hashicorp/go-safetemp/go.mod | 1 + .../hashicorp/go-safetemp/safetemp.go | 40 + .../github.com/hashicorp/go-uuid/.travis.yml | 12 + vendor/github.com/hashicorp/go-uuid/README.md | 4 +- vendor/github.com/hashicorp/go-uuid/go.mod | 1 + vendor/github.com/hashicorp/go-uuid/uuid.go | 38 +- .../hashicorp/go-version/.travis.yml | 2 + .../hashicorp/go-version/constraint.go | 34 +- vendor/github.com/hashicorp/go-version/go.mod | 1 + .../hashicorp/go-version/version.go | 90 +- .../hashicorp/hcl2/ext/dynblock/README.md | 184 + .../hcl2/ext/dynblock/expand_body.go | 252 + .../hcl2/ext/dynblock/expand_spec.go | 215 + .../hashicorp/hcl2/ext/dynblock/expr_wrap.go | 42 + .../hashicorp/hcl2/ext/dynblock/iteration.go | 66 + .../hashicorp/hcl2/ext/dynblock/public.go | 44 + .../hashicorp/hcl2/ext/dynblock/schema.go | 50 + .../hashicorp/hcl2/ext/dynblock/variables.go | 209 + .../hcl2/ext/dynblock/variables_hcldec.go | 43 + .../hashicorp/hcl2/ext/typeexpr/README.md | 67 + .../hashicorp/hcl2/ext/typeexpr/doc.go | 11 + .../hashicorp/hcl2/ext/typeexpr/get_type.go | 196 + .../hashicorp/hcl2/ext/typeexpr/public.go | 129 + vendor/github.com/hashicorp/hcl2/gohcl/doc.go | 4 + .../github.com/hashicorp/hcl2/gohcl/encode.go | 191 + .../hashicorp/hcl2/hcl/diagnostic.go | 42 +- .../hashicorp/hcl2/hcl/diagnostic_text.go | 143 + .../hashicorp/hcl2/hcl/expr_call.go | 46 + .../hcl2/hcl/hclsyntax/diagnostics.go | 23 + .../hcl2/hcl/hclsyntax/expression.go | 471 +- .../hcl2/hcl/hclsyntax/expression_ops.go | 62 +- .../hcl2/hcl/hclsyntax/expression_template.go | 28 +- .../hcl2/hcl/hclsyntax/navigation.go | 18 + .../hashicorp/hcl2/hcl/hclsyntax/node.go | 2 +- .../hashicorp/hcl2/hcl/hclsyntax/parser.go | 477 +- .../hcl2/hcl/hclsyntax/parser_template.go | 79 +- .../hcl2/hcl/hclsyntax/scan_string_lit.go | 24 +- .../hcl2/hcl/hclsyntax/scan_tokens.go | 7172 +++--- .../hcl2/hcl/hclsyntax/scan_tokens.rl | 57 +- .../hashicorp/hcl2/hcl/hclsyntax/spec.md | 193 +- .../hashicorp/hcl2/hcl/hclsyntax/structure.go | 47 +- .../hcl2/hcl/hclsyntax/structure_at_pos.go | 118 + .../hashicorp/hcl2/hcl/hclsyntax/token.go | 66 +- .../hcl2/hcl/hclsyntax/token_type_string.go | 97 +- .../hashicorp/hcl2/hcl/hclsyntax/variables.go | 6 +- .../hashicorp/hcl2/hcl/hclsyntax/walk.go | 44 +- .../hashicorp/hcl2/hcl/json/parser.go | 31 +- .../hashicorp/hcl2/hcl/json/spec.md | 65 +- .../hashicorp/hcl2/hcl/json/structure.go | 88 +- .../github.com/hashicorp/hcl2/hcl/merged.go | 14 +- vendor/github.com/hashicorp/hcl2/hcl/ops.go | 141 + vendor/github.com/hashicorp/hcl2/hcl/pos.go | 13 + .../hashicorp/hcl2/hcl/pos_scanner.go | 18 +- vendor/github.com/hashicorp/hcl2/hcl/spec.md | 137 +- .../hashicorp/hcl2/hcl/structure_at_pos.go | 117 + .../hashicorp/hcl2/hcl/traversal.go | 61 +- .../hashicorp/hcl2/hcl/traversal_for_expr.go | 5 +- .../hashicorp/hcl2/hcldec/public.go | 5 +- .../github.com/hashicorp/hcl2/hcldec/spec.go | 571 +- .../hashicorp/hcl2/hcldec/variables.go | 12 +- vendor/github.com/hashicorp/hcl2/hcled/doc.go | 4 + .../hashicorp/hcl2/hcled/navigation.go | 34 + .../github.com/hashicorp/hcl2/hclwrite/ast.go | 121 + .../hashicorp/hcl2/hclwrite/ast_attribute.go | 48 + .../hashicorp/hcl2/hclwrite/ast_block.go | 74 + .../hashicorp/hcl2/hclwrite/ast_body.go | 153 + .../hashicorp/hcl2/hclwrite/ast_expression.go | 201 + .../github.com/hashicorp/hcl2/hclwrite/doc.go | 11 + .../hashicorp/hcl2/hclwrite/format.go | 492 + .../hashicorp/hcl2/hclwrite/generate.go | 250 + .../hcl2/hclwrite/native_node_sorter.go | 23 + .../hashicorp/hcl2/hclwrite/node.go | 236 + .../hashicorp/hcl2/hclwrite/parser.go | 594 + .../hashicorp/hcl2/hclwrite/public.go | 44 + .../hashicorp/hcl2/hclwrite/tokens.go | 122 + vendor/github.com/hashicorp/hil/convert.go | 19 +- vendor/github.com/hashicorp/hil/go.mod | 6 + vendor/github.com/hashicorp/hil/go.sum | 4 + .../hashicorp/hil/scanner/scanner.go | 6 + vendor/github.com/hashicorp/logutils/go.mod | 1 + .../terraform-config-inspect/LICENSE | 353 + .../tfconfig/diagnostic.go | 138 + .../terraform-config-inspect/tfconfig/doc.go | 21 + .../terraform-config-inspect/tfconfig/load.go | 130 + .../tfconfig/load_hcl.go | 322 + .../tfconfig/load_legacy.go | 325 + .../tfconfig/module.go | 35 + .../tfconfig/module_call.go | 11 + .../tfconfig/output.go | 9 + .../tfconfig/provider_ref.go | 9 + .../tfconfig/resource.go | 64 + .../tfconfig/schema.go | 106 + .../tfconfig/source_pos.go | 50 + .../tfconfig/variable.go | 16 + .../hashicorp/terraform/addrs/count_attr.go | 12 + .../hashicorp/terraform/addrs/doc.go | 17 + .../terraform/addrs/input_variable.go | 41 + .../hashicorp/terraform/addrs/instance_key.go | 123 + .../hashicorp/terraform/addrs/local_value.go | 48 + .../hashicorp/terraform/addrs/module.go | 75 + .../hashicorp/terraform/addrs/module_call.go | 81 + .../terraform/addrs/module_instance.go | 415 + .../hashicorp/terraform/addrs/output_value.go | 75 + .../hashicorp/terraform/addrs/parse_ref.go | 338 + .../hashicorp/terraform/addrs/parse_target.go | 318 + .../hashicorp/terraform/addrs/path_attr.go | 12 + .../terraform/addrs/provider_config.go | 297 + .../terraform/addrs/referenceable.go | 20 + .../hashicorp/terraform/addrs/resource.go | 270 + .../terraform/addrs/resource_phase.go | 105 + .../terraform/addrs/resourcemode_string.go | 24 + .../hashicorp/terraform/addrs/self.go | 14 + .../hashicorp/terraform/addrs/targetable.go | 26 + .../terraform/addrs/terraform_attr.go | 12 + .../terraform/command/format/diagnostic.go | 295 + .../terraform/command/format/diff.go | 1192 + .../terraform/command/format/format.go | 8 + .../terraform/command/format/object_id.go | 123 + .../terraform/command/format/plan.go | 302 + .../terraform/command/format/state.go | 286 + .../config/configschema/decoder_spec.go | 97 - .../config/configschema/implied_type.go | 21 - .../config/configschema/nestingmode_string.go | 16 - .../terraform/config/hcl2shim/flatmap.go | 424 + .../terraform/config/hcl2shim/paths.go | 276 + .../terraform/config/hcl2shim/values.go | 109 +- .../terraform/config/hcl2shim/values_equiv.go | 214 + .../terraform/config/interpolate_funcs.go | 34 + .../terraform/config/module/storage.go | 4 +- .../hashicorp/terraform/configs/backend.go | 55 + .../terraform/configs/compat_shim.go | 116 + .../hashicorp/terraform/configs/config.go | 205 + .../terraform/configs/config_build.go | 179 + .../terraform/configs/configload/copy_dir.go | 125 + .../terraform/configs/configload/doc.go | 4 + .../terraform/configs/configload/getter.go | 150 + .../terraform/configs/configload/inode.go | 21 + .../configs/configload/inode_freebsd.go | 21 + .../configs/configload/inode_windows.go | 8 + .../terraform/configs/configload/loader.go | 150 + .../configs/configload/loader_load.go | 97 + .../configs/configload/loader_snapshot.go | 504 + .../configs/configload/module_mgr.go | 76 + .../configs/configload/source_addr.go | 45 + .../terraform/configs/configload/testing.go | 43 + .../configs/configschema/coerce_value.go | 274 + .../configs/configschema/decoder_spec.go | 117 + .../{config => configs}/configschema/doc.go | 0 .../configs/configschema/empty_value.go | 59 + .../configs/configschema/implied_type.go | 42 + .../configschema/internal_validate.go | 13 + .../configschema/nestingmode_string.go | 28 + .../configs/configschema/none_required.go | 38 + .../configschema/schema.go | 23 + .../configschema/validate_traversal.go | 173 + .../hashicorp/terraform/configs/depends_on.go | 23 + .../hashicorp/terraform/configs/doc.go | 19 + .../hashicorp/terraform/configs/module.go | 404 + .../terraform/configs/module_call.go | 188 + .../terraform/configs/module_merge.go | 247 + .../terraform/configs/module_merge_body.go | 143 + .../terraform/configs/named_values.go | 364 + .../hashicorp/terraform/configs/parser.go | 100 + .../terraform/configs/parser_config.go | 247 + .../terraform/configs/parser_config_dir.go | 142 + .../terraform/configs/parser_values.go | 43 + .../hashicorp/terraform/configs/provider.go | 144 + .../terraform/configs/provisioner.go | 150 + .../configs/provisioneronfailure_string.go | 16 + .../configs/provisionerwhen_string.go | 16 + .../hashicorp/terraform/configs/resource.go | 486 + .../hashicorp/terraform/configs/synth_body.go | 118 + .../hashicorp/terraform/configs/util.go | 63 + .../terraform/configs/variable_type_hint.go | 45 + .../configs/variabletypehint_string.go | 29 + .../terraform/configs/version_constraint.go | 64 + .../github.com/hashicorp/terraform/dag/dag.go | 10 +- .../hashicorp/terraform/dag/walk.go | 94 +- .../helper/didyoumean/name_suggestion.go | 24 + .../hashicorp/terraform/helper/plugin/doc.go | 6 + .../terraform/helper/plugin/grpc_provider.go | 1311 + .../helper/plugin/grpc_provisioner.go | 147 + .../terraform/helper/plugin/unknown.go | 131 + .../helper/resource/grpc_test_provider.go | 43 + .../terraform/helper/resource/state.go | 2 +- .../terraform/helper/resource/state_shim.go | 163 + .../terraform/helper/resource/testing.go | 298 +- .../helper/resource/testing_config.go | 334 +- .../helper/resource/testing_import_state.go | 113 +- .../terraform/helper/schema/backend.go | 138 +- .../terraform/helper/schema/core_schema.go | 200 +- .../terraform/helper/schema/field_reader.go | 8 + .../helper/schema/field_reader_diff.go | 3 + .../helper/schema/field_reader_map.go | 3 + .../helper/schema/field_writer_map.go | 12 +- .../terraform/helper/schema/provider.go | 22 +- .../terraform/helper/schema/provisioner.go | 10 +- .../terraform/helper/schema/resource.go | 270 +- .../terraform/helper/schema/resource_data.go | 20 + .../terraform/helper/schema/resource_diff.go | 2 +- .../helper/schema/resource_timeout.go | 98 +- .../terraform/helper/schema/schema.go | 329 +- .../hashicorp/terraform/helper/schema/set.go | 10 + .../terraform/helper/schema/shims.go | 157 + .../terraform/helper/schema/testing.go | 2 +- .../terraform/helper/validation/validation.go | 54 + .../terraform/internal/earlyconfig/config.go | 123 + .../internal/earlyconfig/config_build.go | 144 + .../internal/earlyconfig/diagnostics.go | 78 + .../terraform/internal/earlyconfig/doc.go | 20 + .../terraform/internal/earlyconfig/module.go | 13 + .../terraform/internal/initwd/copy_dir.go | 125 + .../terraform/internal/initwd/doc.go | 7 + .../terraform/internal/initwd/from_module.go | 363 + .../terraform/internal/initwd/getter.go | 210 + .../terraform/internal/initwd/inode.go | 21 + .../internal/initwd/inode_freebsd.go | 21 + .../internal/initwd/inode_windows.go | 8 + .../terraform/internal/initwd/load_config.go | 56 + .../internal/initwd/module_install.go | 538 + .../internal/initwd/module_install_hooks.go | 36 + .../terraform/internal/initwd/testing.go | 73 + .../internal/initwd/version_required.go | 83 + .../terraform/internal/modsdir/doc.go | 3 + .../terraform/internal/modsdir/manifest.go | 138 + .../terraform/internal/modsdir/paths.go | 3 + .../terraform/internal/tfplugin5/generate.sh | 16 + .../internal/tfplugin5/tfplugin5.pb.go | 3455 +++ .../internal/tfplugin5/tfplugin5.proto | 351 + .../terraform/lang/blocktoattr/doc.go | 5 + .../terraform/lang/blocktoattr/fixup.go | 187 + .../terraform/lang/blocktoattr/schema.go | 145 + .../terraform/lang/blocktoattr/variables.go | 43 + .../hashicorp/terraform/lang/data.go | 33 + .../hashicorp/terraform/lang/doc.go | 5 + .../hashicorp/terraform/lang/eval.go | 477 + .../hashicorp/terraform/lang/funcs/cidr.go | 129 + .../terraform/lang/funcs/collection.go | 1418 ++ .../terraform/lang/funcs/conversion.go | 87 + .../hashicorp/terraform/lang/funcs/crypto.go | 285 + .../terraform/lang/funcs/datetime.go | 70 + .../terraform/lang/funcs/encoding.go | 140 + .../terraform/lang/funcs/filesystem.go | 345 + .../hashicorp/terraform/lang/funcs/number.go | 155 + .../hashicorp/terraform/lang/funcs/string.go | 280 + .../hashicorp/terraform/lang/functions.go | 147 + .../hashicorp/terraform/lang/references.go | 81 + .../hashicorp/terraform/lang/scope.go | 34 + .../hashicorp/terraform/plans/action.go | 22 + .../terraform/plans/action_string.go | 36 + .../hashicorp/terraform/plans/changes.go | 308 + .../hashicorp/terraform/plans/changes_src.go | 190 + .../terraform/plans/changes_state.go | 15 + .../hashicorp/terraform/plans/changes_sync.go | 144 + .../hashicorp/terraform/plans/doc.go | 5 + .../terraform/plans/dynamic_value.go | 96 + .../terraform/plans/objchange/all_null.go | 18 + .../terraform/plans/objchange/compatible.go | 376 + .../terraform/plans/objchange/doc.go | 4 + .../terraform/plans/objchange/lcs.go | 104 + .../plans/objchange/normalize_obj.go | 132 + .../terraform/plans/objchange/objchange.go | 390 + .../terraform/plans/objchange/plan_valid.go | 267 + .../hashicorp/terraform/plans/plan.go | 92 + .../hashicorp/terraform/plugin/client.go | 12 +- .../terraform/plugin/convert/diagnostics.go | 132 + .../terraform/plugin/convert/schema.go | 154 + .../terraform/plugin/discovery/error.go | 34 + .../terraform/plugin/discovery/get.go | 607 +- .../terraform/plugin/discovery/hashicorp.go | 34 + .../terraform/plugin/discovery/meta_set.go | 2 +- .../plugin/discovery/requirements.go | 6 + .../terraform/plugin/discovery/signature.go | 42 +- .../terraform/plugin/discovery/version.go | 5 + .../terraform/plugin/discovery/version_set.go | 5 + .../terraform/plugin/grpc_provider.go | 563 + .../terraform/plugin/grpc_provisioner.go | 178 + .../hashicorp/terraform/plugin/plugin.go | 9 +- .../terraform/plugin/resource_provider.go | 7 +- .../terraform/plugin/resource_provisioner.go | 13 +- .../hashicorp/terraform/plugin/serve.go | 87 +- .../hashicorp/terraform/plugin/ui_input.go | 7 +- .../terraform/providers/addressed_types.go | 47 + .../hashicorp/terraform/providers/doc.go | 3 + .../hashicorp/terraform/providers/provider.go | 351 + .../hashicorp/terraform/providers/resolver.go | 112 + .../hashicorp/terraform/provisioners/doc.go | 3 + .../terraform/provisioners/factory.go | 19 + .../terraform/provisioners/provisioner.go | 82 + .../hashicorp/terraform/registry/client.go | 156 +- .../hashicorp/terraform/registry/errors.go | 40 + .../registry/regsrc/terraform_provider.go | 60 + .../terraform/registry/response/provider.go | 36 + .../registry/response/provider_list.go | 7 + .../registry/response/terraform_provider.go | 96 + .../hashicorp/terraform/states/doc.go | 3 + .../terraform/states/eachmode_string.go | 26 + .../terraform/states/instance_generation.go | 24 + .../terraform/states/instance_object.go | 120 + .../terraform/states/instance_object_src.go | 113 + .../hashicorp/terraform/states/module.go | 285 + .../terraform/states/objectstatus_string.go | 24 + .../terraform/states/output_value.go | 14 + .../hashicorp/terraform/states/resource.go | 239 + .../hashicorp/terraform/states/state.go | 229 + .../terraform/states/state_deepcopy.go | 218 + .../hashicorp/terraform/states/state_equal.go | 18 + .../terraform/states/state_string.go | 279 + .../terraform/states/statefile/diagnostics.go | 62 + .../terraform/states/statefile/doc.go | 3 + .../terraform/states/statefile/file.go | 62 + .../states/statefile/marshal_equal.go | 40 + .../terraform/states/statefile/read.go | 209 + .../terraform/states/statefile/version0.go | 23 + .../terraform/states/statefile/version1.go | 174 + .../states/statefile/version1_upgrade.go | 172 + .../terraform/states/statefile/version2.go | 209 + .../states/statefile/version2_upgrade.go | 145 + .../terraform/states/statefile/version3.go | 50 + .../states/statefile/version3_upgrade.go | 415 + .../terraform/states/statefile/version4.go | 604 + .../terraform/states/statefile/write.go | 12 + .../hashicorp/terraform/states/sync.go | 537 + .../terraform/svchost/disco/disco.go | 178 +- .../hashicorp/terraform/svchost/disco/host.go | 265 +- .../hashicorp/terraform/terraform/context.go | 793 +- .../terraform/terraform/context_components.go | 19 +- .../terraform/terraform/context_graph_type.go | 4 +- .../terraform/terraform/context_import.go | 56 +- .../terraform/terraform/context_input.go | 251 + .../hashicorp/terraform/terraform/debug.go | 523 - .../hashicorp/terraform/terraform/diff.go | 573 +- .../hashicorp/terraform/terraform/eval.go | 17 +- .../terraform/terraform/eval_apply.go | 596 +- .../terraform/eval_check_prevent_destroy.go | 41 +- .../terraform/terraform/eval_context.go | 122 +- .../terraform/eval_context_builtin.go | 292 +- .../terraform/terraform/eval_context_mock.go | 255 +- .../terraform/terraform/eval_count.go | 147 +- .../terraform/eval_count_boundary.go | 95 +- .../terraform/terraform/eval_diff.go | 906 +- .../terraform/terraform/eval_import_state.go | 65 +- .../terraform/terraform/eval_interpolate.go | 56 - .../terraform/terraform/eval_lang.go | 61 + .../terraform/terraform/eval_local.go | 84 +- .../terraform/terraform/eval_output.go | 165 +- .../terraform/terraform/eval_provider.go | 172 +- .../terraform/terraform/eval_provisioner.go | 10 +- .../terraform/terraform/eval_read_data.go | 475 +- .../terraform/terraform/eval_refresh.go | 79 +- .../terraform/terraform/eval_resource.go | 13 - .../terraform/terraform/eval_sequence.go | 19 +- .../terraform/terraform/eval_state.go | 552 +- .../terraform/terraform/eval_state_upgrade.go | 115 + .../terraform/terraform/eval_validate.go | 591 +- .../terraform/eval_validate_selfref.go | 105 +- .../terraform/terraform/eval_variable.go | 219 +- .../terraform/terraform/evaltree_provider.go | 70 +- .../hashicorp/terraform/terraform/evaluate.go | 933 + .../terraform/terraform/evaluate_valid.go | 299 + .../hashicorp/terraform/terraform/graph.go | 97 +- .../terraform/terraform/graph_builder.go | 40 +- .../terraform/graph_builder_apply.go | 116 +- .../terraform/graph_builder_destroy_plan.go | 58 +- .../terraform/terraform/graph_builder_eval.go | 108 + .../terraform/graph_builder_import.go | 48 +- .../terraform/graph_builder_input.go | 27 - .../terraform/terraform/graph_builder_plan.go | 99 +- .../terraform/graph_builder_refresh.go | 89 +- .../terraform/graph_builder_validate.go | 4 +- .../terraform/graph_interface_subgraph.go | 6 +- .../terraform/terraform/graph_walk.go | 48 +- .../terraform/terraform/graph_walk_context.go | 161 +- .../terraform/graph_walk_operation.go | 2 +- .../terraform/terraform/graphtype_string.go | 4 +- .../hashicorp/terraform/terraform/hook.go | 112 +- .../terraform/terraform/hook_mock.go | 287 +- .../terraform/terraform/hook_stop.go | 41 +- .../terraform/terraform/interpolate.go | 86 +- .../terraform/module_dependencies.go | 139 +- .../terraform/node_count_boundary.go | 18 +- .../terraform/terraform/node_data_destroy.go | 42 +- .../terraform/terraform/node_data_refresh.go | 234 +- .../terraform/terraform/node_local.go | 68 +- .../terraform/node_module_removed.go | 88 +- .../terraform/node_module_variable.go | 162 +- .../terraform/terraform/node_output.go | 179 +- .../terraform/terraform/node_output_orphan.go | 32 +- .../terraform/node_provider_abstract.go | 59 +- .../terraform/node_provider_disabled.go | 29 +- .../terraform/terraform/node_provider_eval.go | 20 + .../terraform/terraform/node_provisioner.go | 15 +- .../terraform/node_resource_abstract.go | 437 +- .../terraform/node_resource_abstract_count.go | 50 - .../terraform/node_resource_apply.go | 423 +- .../terraform/node_resource_apply_instance.go | 411 + .../terraform/node_resource_destroy.go | 358 +- .../node_resource_destroy_deposed.go | 313 + .../terraform/terraform/node_resource_plan.go | 110 +- .../terraform/node_resource_plan_destroy.go | 89 +- .../terraform/node_resource_plan_instance.go | 225 +- .../terraform/node_resource_plan_orphan.go | 86 +- .../terraform/node_resource_refresh.go | 239 +- .../terraform/node_resource_validate.go | 191 +- .../terraform/terraform/node_root_variable.go | 38 +- .../hashicorp/terraform/terraform/path.go | 15 +- .../hashicorp/terraform/terraform/plan.go | 130 +- .../terraform/terraform/provider_mock.go | 496 + .../terraform/terraform/provisioner_mock.go | 154 + .../hashicorp/terraform/terraform/resource.go | 222 +- .../terraform/terraform/resource_address.go | 175 +- .../terraform/terraform/resource_provider.go | 55 +- .../terraform/resource_provisioner.go | 16 + .../terraform/resource_provisioner_mock.go | 17 +- .../hashicorp/terraform/terraform/schemas.go | 256 +- .../terraform/terraform/semantics.go | 132 - .../hashicorp/terraform/terraform/state.go | 415 +- .../terraform/terraform/state_add.go | 374 - .../terraform/terraform/transform.go | 11 +- .../transform_attach_config_provider.go | 7 +- .../transform_attach_config_resource.go | 54 +- .../terraform/transform_attach_schema.go | 99 + .../terraform/transform_attach_state.go | 56 +- .../terraform/terraform/transform_config.go | 80 +- .../terraform/transform_config_flat.go | 49 +- .../terraform/transform_count_boundary.go | 9 +- .../terraform/terraform/transform_deposed.go | 178 - .../terraform/transform_destroy_cbd.go | 185 +- .../terraform/transform_destroy_edge.go | 108 +- .../terraform/terraform/transform_diff.go | 220 +- .../terraform/transform_import_provider.go | 22 +- .../terraform/transform_import_state.go | 203 +- .../terraform/terraform/transform_local.go | 30 +- .../terraform/transform_module_variable.go | 152 +- .../terraform/transform_orphan_count.go | 139 +- .../terraform/transform_orphan_output.go | 31 +- .../terraform/transform_orphan_resource.go | 167 +- .../terraform/terraform/transform_output.go | 42 +- .../terraform/terraform/transform_provider.go | 489 +- .../terraform/transform_provisioner.go | 23 +- .../terraform/transform_reference.go | 354 +- .../terraform/transform_removed_modules.go | 17 +- .../terraform/transform_resource_count.go | 45 +- .../terraform/terraform/transform_state.go | 77 +- .../terraform/terraform/transform_targets.go | 81 +- .../terraform/terraform/transform_variable.go | 22 +- .../hashicorp/terraform/terraform/ui_input.go | 4 +- .../terraform/terraform/ui_input_mock.go | 4 +- .../terraform/terraform/ui_input_prefix.go | 5 +- .../terraform/ui_output_provisioner.go | 12 +- .../terraform/valuesourcetype_string.go | 44 + .../terraform/terraform/variables.go | 409 +- .../terraform/terraform/version_required.go | 97 +- .../terraform/walkoperation_string.go | 4 +- .../terraform/tfdiags/config_traversals.go | 68 + .../hashicorp/terraform/tfdiags/contextual.go | 372 + .../hashicorp/terraform/tfdiags/diagnostic.go | 14 + .../terraform/tfdiags/diagnostic_base.go | 31 + .../terraform/tfdiags/diagnostics.go | 149 + .../hashicorp/terraform/tfdiags/error.go | 7 +- .../hashicorp/terraform/tfdiags/hcl.go | 10 + .../terraform/tfdiags/rpc_friendly.go | 6 + .../terraform/tfdiags/simple_warning.go | 7 +- .../hashicorp/terraform/tfdiags/sourceless.go | 13 + .../hashicorp/terraform/version/version.go | 10 +- vendor/github.com/hashicorp/yamux/session.go | 65 +- vendor/github.com/hashicorp/yamux/stream.go | 45 +- vendor/github.com/hashicorp/yamux/util.go | 15 + vendor/github.com/jmespath/go-jmespath/api.go | 2 +- .../mattn/go-colorable/colorable_windows.go | 208 +- vendor/github.com/mattn/go-colorable/go.mod | 3 + vendor/github.com/mattn/go-colorable/go.sum | 4 + vendor/github.com/mattn/go-isatty/.travis.yml | 4 + vendor/github.com/mattn/go-isatty/go.mod | 3 + vendor/github.com/mattn/go-isatty/go.sum | 2 + .../mattn/go-isatty/isatty_appengine.go | 15 - .../github.com/mattn/go-isatty/isatty_bsd.go | 6 + .../mattn/go-isatty/isatty_linux.go | 20 +- .../mattn/go-isatty/isatty_linux_ppc64x.go | 19 - .../mattn/go-isatty/isatty_others.go | 9 +- .../mattn/go-isatty/isatty_solaris.go | 6 + .../mitchellh/colorstring/.travis.yml | 15 + .../github.com/mitchellh/colorstring/LICENSE | 21 + .../mitchellh/colorstring/README.md | 30 + .../mitchellh/colorstring/colorstring.go | 244 + .../github.com/mitchellh/colorstring/go.mod | 1 + vendor/github.com/oklog/run/.gitignore | 14 + vendor/github.com/oklog/run/.travis.yml | 12 + vendor/github.com/oklog/run/LICENSE | 201 + vendor/github.com/oklog/run/README.md | 73 + vendor/github.com/oklog/run/group.go | 62 + vendor/github.com/posener/complete/.gitignore | 2 + .../github.com/posener/complete/.travis.yml | 7 +- vendor/github.com/posener/complete/args.go | 9 + vendor/github.com/posener/complete/cmd/cmd.go | 2 +- .../posener/complete/cmd/install/fish.go | 18 +- .../posener/complete/cmd/install/install.go | 2 +- .../posener/complete/cmd/install/utils.go | 5 +- .../github.com/posener/complete/complete.go | 30 +- vendor/github.com/posener/complete/go.mod | 3 + vendor/github.com/posener/complete/go.sum | 4 + vendor/github.com/posener/complete/log.go | 3 +- .../posener/complete/metalinter.json | 21 - vendor/github.com/posener/complete/readme.md | 1 + vendor/github.com/spf13/afero/.travis.yml | 21 + vendor/github.com/spf13/afero/LICENSE.txt | 174 + vendor/github.com/spf13/afero/README.md | 452 + vendor/github.com/spf13/afero/afero.go | 108 + vendor/github.com/spf13/afero/appveyor.yml | 15 + vendor/github.com/spf13/afero/basepath.go | 180 + .../github.com/spf13/afero/cacheOnReadFs.go | 290 + vendor/github.com/spf13/afero/const_bsds.go | 22 + .../github.com/spf13/afero/const_win_unix.go | 25 + .../github.com/spf13/afero/copyOnWriteFs.go | 293 + vendor/github.com/spf13/afero/go.mod | 3 + vendor/github.com/spf13/afero/go.sum | 2 + vendor/github.com/spf13/afero/httpFs.go | 110 + vendor/github.com/spf13/afero/ioutil.go | 230 + vendor/github.com/spf13/afero/lstater.go | 27 + vendor/github.com/spf13/afero/match.go | 110 + vendor/github.com/spf13/afero/mem/dir.go | 37 + vendor/github.com/spf13/afero/mem/dirmap.go | 43 + vendor/github.com/spf13/afero/mem/file.go | 317 + vendor/github.com/spf13/afero/memmap.go | 365 + vendor/github.com/spf13/afero/os.go | 101 + vendor/github.com/spf13/afero/path.go | 106 + vendor/github.com/spf13/afero/readonlyfs.go | 80 + vendor/github.com/spf13/afero/regexpfs.go | 214 + vendor/github.com/spf13/afero/unionFile.go | 316 + vendor/github.com/spf13/afero/util.go | 330 + vendor/github.com/ulikunitz/xz/README.md | 72 +- vendor/github.com/ulikunitz/xz/TODO.md | 14 +- .../github.com/ulikunitz/xz/lzma/encoder.go | 2 +- .../vmihailenco/msgpack/.travis.yml | 17 + .../vmihailenco/msgpack/CHANGELOG.md | 24 + vendor/github.com/vmihailenco/msgpack/LICENSE | 25 + .../github.com/vmihailenco/msgpack/Makefile | 5 + .../github.com/vmihailenco/msgpack/README.md | 69 + .../vmihailenco/msgpack/appengine.go | 64 + .../vmihailenco/msgpack/codes/codes.go | 86 + .../github.com/vmihailenco/msgpack/decode.go | 546 + .../vmihailenco/msgpack/decode_map.go | 338 + .../vmihailenco/msgpack/decode_number.go | 307 + .../vmihailenco/msgpack/decode_query.go | 158 + .../vmihailenco/msgpack/decode_slice.go | 193 + .../vmihailenco/msgpack/decode_string.go | 175 + .../vmihailenco/msgpack/decode_value.go | 234 + .../github.com/vmihailenco/msgpack/encode.go | 170 + .../vmihailenco/msgpack/encode_map.go | 172 + .../vmihailenco/msgpack/encode_number.go | 230 + .../vmihailenco/msgpack/encode_slice.go | 124 + .../vmihailenco/msgpack/encode_value.go | 167 + vendor/github.com/vmihailenco/msgpack/ext.go | 238 + .../github.com/vmihailenco/msgpack/msgpack.go | 17 + vendor/github.com/vmihailenco/msgpack/tag.go | 42 + vendor/github.com/vmihailenco/msgpack/time.go | 145 + .../github.com/vmihailenco/msgpack/types.go | 310 + .../github.com/zclconf/go-cty/cty/capsule.go | 2 +- .../zclconf/go-cty/cty/convert/conversion.go | 23 + .../cty/convert/conversion_collection.go | 114 + .../go-cty/cty/convert/conversion_object.go | 76 + .../cty/convert/conversion_primitive.go | 6 +- .../go-cty/cty/convert/conversion_tuple.go | 71 + .../go-cty/cty/convert/mismatch_msg.go | 220 + .../zclconf/go-cty/cty/convert/public.go | 4 +- .../zclconf/go-cty/cty/convert/unify.go | 254 +- .../zclconf/go-cty/cty/function/doc.go | 2 +- .../zclconf/go-cty/cty/function/function.go | 4 +- .../go-cty/cty/function/stdlib/datetime.go | 385 + .../go-cty/cty/function/stdlib/format_fsm.go | 74 +- .../go-cty/cty/function/stdlib/format_fsm.rl | 18 +- .../go-cty/cty/function/unpredictable.go | 31 + vendor/github.com/zclconf/go-cty/cty/gob.go | 4 +- .../github.com/zclconf/go-cty/cty/gocty/in.go | 20 + .../zclconf/go-cty/cty/json/type_implied.go | 7 +- .../zclconf/go-cty/cty/json/unmarshal.go | 2 +- .../zclconf/go-cty/cty/list_type.go | 10 +- .../github.com/zclconf/go-cty/cty/map_type.go | 10 +- .../zclconf/go-cty/cty/msgpack/doc.go | 14 + .../zclconf/go-cty/cty/msgpack/dynamic.go | 31 + .../zclconf/go-cty/cty/msgpack/infinity.go | 8 + .../zclconf/go-cty/cty/msgpack/marshal.go | 207 + .../go-cty/cty/msgpack/type_implied.go | 167 + .../zclconf/go-cty/cty/msgpack/unknown.go | 16 + .../zclconf/go-cty/cty/msgpack/unmarshal.go | 334 + .../zclconf/go-cty/cty/object_type.go | 2 +- vendor/github.com/zclconf/go-cty/cty/path.go | 22 +- .../github.com/zclconf/go-cty/cty/path_set.go | 198 + .../zclconf/go-cty/cty/primitive_type.go | 2 +- .../zclconf/go-cty/cty/set_internals.go | 18 +- .../github.com/zclconf/go-cty/cty/set_type.go | 10 +- .../zclconf/go-cty/cty/tuple_type.go | 2 +- vendor/github.com/zclconf/go-cty/cty/type.go | 29 +- .../zclconf/go-cty/cty/type_conform.go | 29 +- .../github.com/zclconf/go-cty/cty/unknown.go | 9 +- .../zclconf/go-cty/cty/unknown_as_null.go | 64 + .../zclconf/go-cty/cty/value_init.go | 26 + .../zclconf/go-cty/cty/value_ops.go | 76 +- vendor/github.com/zclconf/go-cty/cty/walk.go | 182 + vendor/golang.org/x/crypto/blowfish/cipher.go | 8 + vendor/golang.org/x/crypto/cast5/cast5.go | 11 +- .../x/crypto/curve25519/curve25519.go | 2 +- .../x/crypto/internal/chacha20/asm_arm64.s | 308 + .../crypto/internal/chacha20/chacha_arm64.go | 31 + .../crypto/internal/chacha20/chacha_noasm.go | 2 +- .../crypto/internal/chacha20/chacha_s390x.go | 11 +- .../x/crypto/internal/chacha20/chacha_s390x.s | 23 - vendor/golang.org/x/crypto/openpgp/write.go | 2 + vendor/golang.org/x/crypto/pkcs12/pkcs12.go | 7 +- .../golang.org/x/crypto/poly1305/mac_noasm.go | 11 + .../golang.org/x/crypto/poly1305/poly1305.go | 80 +- .../golang.org/x/crypto/poly1305/sum_amd64.go | 58 +- .../golang.org/x/crypto/poly1305/sum_amd64.s | 63 +- .../poly1305/{sum_ref.go => sum_generic.go} | 121 +- .../golang.org/x/crypto/poly1305/sum_noasm.go | 4 +- .../golang.org/x/crypto/poly1305/sum_s390x.go | 17 +- .../golang.org/x/crypto/poly1305/sum_s390x.s | 22 - .../x/crypto/poly1305/sum_vmsl_s390x.s | 22 - vendor/golang.org/x/net/html/atom/atom.go | 78 - vendor/golang.org/x/net/html/atom/gen.go | 712 - vendor/golang.org/x/net/html/atom/table.go | 783 - vendor/golang.org/x/net/html/const.go | 112 - vendor/golang.org/x/net/html/doc.go | 106 - vendor/golang.org/x/net/html/doctype.go | 156 - vendor/golang.org/x/net/html/entity.go | 2253 -- vendor/golang.org/x/net/html/escape.go | 258 - vendor/golang.org/x/net/html/foreign.go | 226 - vendor/golang.org/x/net/html/node.go | 220 - vendor/golang.org/x/net/html/parse.go | 2311 -- vendor/golang.org/x/net/html/render.go | 271 - vendor/golang.org/x/net/html/token.go | 1219 - vendor/golang.org/x/net/http2/frame.go | 2 +- vendor/golang.org/x/net/http2/hpack/hpack.go | 10 +- vendor/golang.org/x/net/http2/server.go | 6 - vendor/golang.org/x/net/http2/transport.go | 27 +- vendor/golang.org/x/net/trace/trace.go | 27 +- vendor/golang.org/x/net/trace/trace_go16.go | 21 - vendor/golang.org/x/net/trace/trace_go17.go | 21 - .../golang.org/x/sync/semaphore/semaphore.go | 12 +- vendor/golang.org/x/sys/cpu/byteorder.go | 30 + vendor/golang.org/x/sys/cpu/cpu.go | 89 + vendor/golang.org/x/sys/cpu/cpu_arm.go | 9 + vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 16 + vendor/golang.org/x/sys/cpu/cpu_gccgo.c | 43 + vendor/golang.org/x/sys/cpu/cpu_gccgo.go | 26 + vendor/golang.org/x/sys/cpu/cpu_linux.go | 55 + .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 67 + .../golang.org/x/sys/cpu/cpu_linux_ppc64x.go | 33 + vendor/golang.org/x/sys/cpu/cpu_mips64x.go | 11 + vendor/golang.org/x/sys/cpu/cpu_mipsx.go | 11 + .../golang.org/x/sys/cpu/cpu_other_arm64.go | 11 + .../golang.org/x/sys/cpu/cpu_other_ppc64x.go | 12 + vendor/golang.org/x/sys/cpu/cpu_s390x.go | 9 + vendor/golang.org/x/sys/cpu/cpu_x86.go | 57 + vendor/golang.org/x/sys/cpu/cpu_x86.s | 27 + vendor/golang.org/x/sys/unix/README.md | 16 +- vendor/golang.org/x/sys/unix/mkall.sh | 4 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 12 +- vendor/golang.org/x/sys/unix/mksyscall.go | 4 + .../x/sys/unix/mksyscall_aix_ppc64.go | 602 + .../x/sys/unix/mksyscall_aix_ppc64.pl | 579 - .../x/sys/unix/mksyscall_solaris.go | 335 + .../x/sys/unix/mksyscall_solaris.pl | 294 - vendor/golang.org/x/sys/unix/syscall_aix.go | 2 +- .../golang.org/x/sys/unix/syscall_darwin.go | 1 + vendor/golang.org/x/sys/unix/syscall_linux.go | 27 + .../x/sys/unix/zerrors_linux_386.go | 28 +- .../x/sys/unix/zerrors_linux_amd64.go | 28 +- .../x/sys/unix/zerrors_linux_arm.go | 28 +- .../x/sys/unix/zerrors_linux_arm64.go | 28 +- .../x/sys/unix/zerrors_linux_mips.go | 28 +- .../x/sys/unix/zerrors_linux_mips64.go | 28 +- .../x/sys/unix/zerrors_linux_mips64le.go | 28 +- .../x/sys/unix/zerrors_linux_mipsle.go | 28 +- .../x/sys/unix/zerrors_linux_ppc64.go | 28 +- .../x/sys/unix/zerrors_linux_ppc64le.go | 28 +- .../x/sys/unix/zerrors_linux_riscv64.go | 28 +- .../x/sys/unix/zerrors_linux_s390x.go | 28 +- .../x/sys/unix/zerrors_linux_sparc64.go | 28 +- .../x/sys/unix/zsyscall_aix_ppc64.go | 2 +- .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 2 +- .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 2 +- .../x/sys/unix/zsyscall_darwin_amd64.go | 15 + .../x/sys/unix/zsyscall_darwin_amd64.s | 2 + .../x/sys/unix/zsyscall_solaris_amd64.go | 2 +- .../x/sys/unix/zsysnum_darwin_amd64.go | 6 +- .../x/sys/unix/zsysnum_freebsd_arm64.go | 2 +- .../x/sys/unix/zsysnum_netbsd_arm64.go | 2 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 65 +- .../x/sys/unix/ztypes_linux_amd64.go | 65 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 65 +- .../x/sys/unix/ztypes_linux_arm64.go | 65 +- .../x/sys/unix/ztypes_linux_mips.go | 65 +- .../x/sys/unix/ztypes_linux_mips64.go | 65 +- .../x/sys/unix/ztypes_linux_mips64le.go | 65 +- .../x/sys/unix/ztypes_linux_mipsle.go | 65 +- .../x/sys/unix/ztypes_linux_ppc64.go | 65 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 65 +- .../x/sys/unix/ztypes_linux_riscv64.go | 65 +- .../x/sys/unix/ztypes_linux_s390x.go | 65 +- .../x/sys/unix/ztypes_linux_sparc64.go | 65 +- vendor/golang.org/x/text/unicode/bidi/bidi.go | 2 +- .../golang.org/x/text/unicode/bidi/bracket.go | 2 +- vendor/golang.org/x/text/unicode/bidi/core.go | 2 +- vendor/golang.org/x/text/unicode/bidi/gen.go | 2 +- .../x/text/unicode/bidi/gen_ranges.go | 2 +- .../x/text/unicode/norm/composition.go | 4 +- .../x/text/unicode/norm/forminfo.go | 14 + .../x/text/unicode/norm/maketables.go | 20 +- .../x/text/unicode/norm/normalize.go | 4 +- .../x/text/unicode/norm/tables10.0.0.go | 1890 +- .../x/text/unicode/norm/tables9.0.0.go | 1890 +- .../api/support/bundler/bundler.go | 4 +- .../google.golang.org/appengine/.travis.yml | 20 + .../appengine/CONTRIBUTING.md | 90 + vendor/google.golang.org/appengine/LICENSE | 202 + vendor/google.golang.org/appengine/README.md | 73 + .../google.golang.org/appengine/appengine.go | 137 + .../appengine/appengine_vm.go | 20 + .../appengine/datastore/datastore.go | 407 + .../appengine/datastore/doc.go | 361 + .../appengine/datastore/key.go | 396 + .../appengine/datastore/load.go | 429 + .../appengine/datastore/metadata.go | 78 + .../appengine/datastore/prop.go | 330 + .../appengine/datastore/query.go | 757 + .../appengine/datastore/save.go | 333 + .../appengine/datastore/transaction.go | 96 + vendor/google.golang.org/appengine/errors.go | 46 + vendor/google.golang.org/appengine/go.mod | 7 + vendor/google.golang.org/appengine/go.sum | 6 + .../google.golang.org/appengine/identity.go | 142 + .../appengine/internal/api.go | 671 + .../appengine/internal/api_classic.go | 169 + .../appengine/internal/api_common.go | 123 + .../appengine/internal/app_id.go | 28 + .../app_identity/app_identity_service.pb.go | 611 + .../app_identity/app_identity_service.proto | 64 + .../appengine/internal/base/api_base.pb.go | 308 + .../appengine/internal/base/api_base.proto | 33 + .../internal/datastore/datastore_v3.pb.go | 4367 ++++ .../internal/datastore/datastore_v3.proto | 551 + .../appengine/internal/identity.go | 55 + .../appengine/internal/identity_classic.go | 61 + .../appengine/internal/identity_flex.go | 11 + .../appengine/internal/identity_vm.go | 134 + .../appengine/internal/internal.go | 110 + .../appengine/internal/log/log_service.pb.go | 1313 + .../appengine/internal/log/log_service.proto | 150 + .../appengine/internal/main.go | 16 + .../appengine/internal/main_common.go | 7 + .../appengine/internal/main_vm.go | 69 + .../appengine/internal/metadata.go | 60 + .../internal/modules/modules_service.pb.go | 786 + .../internal/modules/modules_service.proto | 80 + .../appengine/internal/net.go | 56 + .../appengine/internal/regen.sh | 40 + .../internal/remote_api/remote_api.pb.go | 361 + .../internal/remote_api/remote_api.proto | 44 + .../appengine/internal/transaction.go | 115 + .../google.golang.org/appengine/namespace.go | 25 + vendor/google.golang.org/appengine/timeout.go | 20 + .../appengine/travis_install.sh | 18 + .../appengine/travis_test.sh | 12 + .../grpc/balancer/balancer.go | 13 + .../grpc/balancer/base/balancer.go | 47 +- .../grpc/balancer_conn_wrappers.go | 22 + .../grpc/balancer_v1_wrapper.go | 6 +- vendor/google.golang.org/grpc/clientconn.go | 469 +- vendor/google.golang.org/grpc/dialoptions.go | 24 +- .../google.golang.org/grpc/health/server.go | 40 + .../grpc/internal/channelz/funcs.go | 89 +- .../grpc/internal/envconfig/envconfig.go | 3 +- .../grpc/internal/internal.go | 13 +- .../grpc/internal/transport/http2_client.go | 15 +- .../grpc/internal/transport/transport.go | 62 +- .../grpc/keepalive/keepalive.go | 2 +- .../grpc/resolver_conn_wrapper.go | 2 +- vendor/google.golang.org/grpc/rpc_util.go | 27 +- vendor/google.golang.org/grpc/stream.go | 5 +- .../grpc/test/bufconn/bufconn.go | 244 + vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 7 +- vendor/modules.txt | 168 +- 1034 files changed, 121537 insertions(+), 39507 deletions(-) create mode 100644 vendor/github.com/agext/levenshtein/go.mod create mode 100644 vendor/github.com/agext/levenshtein/test.sh create mode 100644 vendor/github.com/armon/go-radix/go.mod rename vendor/github.com/aws/aws-sdk-go/aws/{context.go => context_1_5.go} (58%) delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/host.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go delete mode 100644 vendor/github.com/go-ini/ini/.gitignore delete mode 100644 vendor/github.com/go-ini/ini/.travis.yml delete mode 100644 vendor/github.com/go-ini/ini/LICENSE delete mode 100644 vendor/github.com/go-ini/ini/Makefile delete mode 100644 vendor/github.com/go-ini/ini/README.md delete mode 100644 vendor/github.com/go-ini/ini/README_ZH.md delete mode 100644 vendor/github.com/go-ini/ini/error.go delete mode 100644 vendor/github.com/go-ini/ini/ini.go delete mode 100644 vendor/github.com/go-ini/ini/key.go delete mode 100644 vendor/github.com/go-ini/ini/parser.go delete mode 100644 vendor/github.com/go-ini/ini/section.go delete mode 100644 vendor/github.com/go-ini/ini/struct.go create mode 100644 vendor/github.com/golang/protobuf/proto/deprecated.go create mode 100644 vendor/github.com/google/go-cmp/LICENSE create mode 100644 vendor/github.com/google/go-cmp/cmp/compare.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/function/func.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/format.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/sort.go create mode 100644 vendor/github.com/google/go-cmp/cmp/options.go create mode 100644 vendor/github.com/google/go-cmp/cmp/path.go create mode 100644 vendor/github.com/google/go-cmp/cmp/reporter.go create mode 100644 vendor/github.com/google/go-cmp/cmp/unsafe_panic.go create mode 100644 vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/go.mod create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/handlers.go create mode 100644 vendor/github.com/hashicorp/go-getter/checksum.go create mode 100644 vendor/github.com/hashicorp/go-getter/client_option.go create mode 100644 vendor/github.com/hashicorp/go-getter/client_option_progress.go create mode 100644 vendor/github.com/hashicorp/go-getter/common.go create mode 100644 vendor/github.com/hashicorp/go-getter/detect_git.go create mode 100644 vendor/github.com/hashicorp/go-getter/detect_ssh.go create mode 100644 vendor/github.com/hashicorp/go-getter/get_base.go create mode 100644 vendor/github.com/hashicorp/go-getter/get_file_copy.go create mode 100644 vendor/github.com/hashicorp/go-getter/go.mod create mode 100644 vendor/github.com/hashicorp/go-getter/go.sum create mode 100644 vendor/github.com/hashicorp/go-hclog/.gitignore create mode 100644 vendor/github.com/hashicorp/go-hclog/go.mod create mode 100644 vendor/github.com/hashicorp/go-hclog/go.sum create mode 100644 vendor/github.com/hashicorp/go-hclog/nulllogger.go create mode 100644 vendor/github.com/hashicorp/go-plugin/go.mod create mode 100644 vendor/github.com/hashicorp/go-plugin/go.sum create mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_broker.go create mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_controller.go create mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go create mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go create mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto create mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go create mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto create mode 100644 vendor/github.com/hashicorp/go-plugin/mtls.go create mode 100644 vendor/github.com/hashicorp/go-safetemp/LICENSE create mode 100644 vendor/github.com/hashicorp/go-safetemp/README.md create mode 100644 vendor/github.com/hashicorp/go-safetemp/go.mod create mode 100644 vendor/github.com/hashicorp/go-safetemp/safetemp.go create mode 100644 vendor/github.com/hashicorp/go-uuid/.travis.yml create mode 100644 vendor/github.com/hashicorp/go-uuid/go.mod create mode 100644 vendor/github.com/hashicorp/go-version/go.mod create mode 100644 vendor/github.com/hashicorp/hcl2/ext/dynblock/README.md create mode 100644 vendor/github.com/hashicorp/hcl2/ext/dynblock/expand_body.go create mode 100644 vendor/github.com/hashicorp/hcl2/ext/dynblock/expand_spec.go create mode 100644 vendor/github.com/hashicorp/hcl2/ext/dynblock/expr_wrap.go create mode 100644 vendor/github.com/hashicorp/hcl2/ext/dynblock/iteration.go create mode 100644 vendor/github.com/hashicorp/hcl2/ext/dynblock/public.go create mode 100644 vendor/github.com/hashicorp/hcl2/ext/dynblock/schema.go create mode 100644 vendor/github.com/hashicorp/hcl2/ext/dynblock/variables.go create mode 100644 vendor/github.com/hashicorp/hcl2/ext/dynblock/variables_hcldec.go create mode 100644 vendor/github.com/hashicorp/hcl2/ext/typeexpr/README.md create mode 100644 vendor/github.com/hashicorp/hcl2/ext/typeexpr/doc.go create mode 100644 vendor/github.com/hashicorp/hcl2/ext/typeexpr/get_type.go create mode 100644 vendor/github.com/hashicorp/hcl2/ext/typeexpr/public.go create mode 100644 vendor/github.com/hashicorp/hcl2/gohcl/encode.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/expr_call.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/diagnostics.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure_at_pos.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/structure_at_pos.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcled/doc.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcled/navigation.go create mode 100644 vendor/github.com/hashicorp/hcl2/hclwrite/ast.go create mode 100644 vendor/github.com/hashicorp/hcl2/hclwrite/ast_attribute.go create mode 100644 vendor/github.com/hashicorp/hcl2/hclwrite/ast_block.go create mode 100644 vendor/github.com/hashicorp/hcl2/hclwrite/ast_body.go create mode 100644 vendor/github.com/hashicorp/hcl2/hclwrite/ast_expression.go create mode 100644 vendor/github.com/hashicorp/hcl2/hclwrite/doc.go create mode 100644 vendor/github.com/hashicorp/hcl2/hclwrite/format.go create mode 100644 vendor/github.com/hashicorp/hcl2/hclwrite/generate.go create mode 100644 vendor/github.com/hashicorp/hcl2/hclwrite/native_node_sorter.go create mode 100644 vendor/github.com/hashicorp/hcl2/hclwrite/node.go create mode 100644 vendor/github.com/hashicorp/hcl2/hclwrite/parser.go create mode 100644 vendor/github.com/hashicorp/hcl2/hclwrite/public.go create mode 100644 vendor/github.com/hashicorp/hcl2/hclwrite/tokens.go create mode 100644 vendor/github.com/hashicorp/hil/go.mod create mode 100644 vendor/github.com/hashicorp/hil/go.sum create mode 100644 vendor/github.com/hashicorp/logutils/go.mod create mode 100644 vendor/github.com/hashicorp/terraform-config-inspect/LICENSE create mode 100644 vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/diagnostic.go create mode 100644 vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/doc.go create mode 100644 vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load.go create mode 100644 vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_hcl.go create mode 100644 vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_legacy.go create mode 100644 vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module.go create mode 100644 vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module_call.go create mode 100644 vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/output.go create mode 100644 vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/provider_ref.go create mode 100644 vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/resource.go create mode 100644 vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/schema.go create mode 100644 vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/source_pos.go create mode 100644 vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/variable.go create mode 100644 vendor/github.com/hashicorp/terraform/addrs/count_attr.go create mode 100644 vendor/github.com/hashicorp/terraform/addrs/doc.go create mode 100644 vendor/github.com/hashicorp/terraform/addrs/input_variable.go create mode 100644 vendor/github.com/hashicorp/terraform/addrs/instance_key.go create mode 100644 vendor/github.com/hashicorp/terraform/addrs/local_value.go create mode 100644 vendor/github.com/hashicorp/terraform/addrs/module.go create mode 100644 vendor/github.com/hashicorp/terraform/addrs/module_call.go create mode 100644 vendor/github.com/hashicorp/terraform/addrs/module_instance.go create mode 100644 vendor/github.com/hashicorp/terraform/addrs/output_value.go create mode 100644 vendor/github.com/hashicorp/terraform/addrs/parse_ref.go create mode 100644 vendor/github.com/hashicorp/terraform/addrs/parse_target.go create mode 100644 vendor/github.com/hashicorp/terraform/addrs/path_attr.go create mode 100644 vendor/github.com/hashicorp/terraform/addrs/provider_config.go create mode 100644 vendor/github.com/hashicorp/terraform/addrs/referenceable.go create mode 100644 vendor/github.com/hashicorp/terraform/addrs/resource.go create mode 100644 vendor/github.com/hashicorp/terraform/addrs/resource_phase.go create mode 100644 vendor/github.com/hashicorp/terraform/addrs/resourcemode_string.go create mode 100644 vendor/github.com/hashicorp/terraform/addrs/self.go create mode 100644 vendor/github.com/hashicorp/terraform/addrs/targetable.go create mode 100644 vendor/github.com/hashicorp/terraform/addrs/terraform_attr.go create mode 100644 vendor/github.com/hashicorp/terraform/command/format/diagnostic.go create mode 100644 vendor/github.com/hashicorp/terraform/command/format/diff.go create mode 100644 vendor/github.com/hashicorp/terraform/command/format/format.go create mode 100644 vendor/github.com/hashicorp/terraform/command/format/object_id.go create mode 100644 vendor/github.com/hashicorp/terraform/command/format/plan.go create mode 100644 vendor/github.com/hashicorp/terraform/command/format/state.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/configschema/decoder_spec.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/configschema/implied_type.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go create mode 100644 vendor/github.com/hashicorp/terraform/config/hcl2shim/flatmap.go create mode 100644 vendor/github.com/hashicorp/terraform/config/hcl2shim/paths.go create mode 100644 vendor/github.com/hashicorp/terraform/config/hcl2shim/values_equiv.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/backend.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/compat_shim.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/config.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/config_build.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/configload/copy_dir.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/configload/doc.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/configload/getter.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/configload/inode.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/configload/inode_freebsd.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/configload/inode_windows.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/configload/loader.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/configload/loader_load.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/configload/loader_snapshot.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/configload/module_mgr.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/configload/source_addr.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/configload/testing.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/configschema/coerce_value.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/configschema/decoder_spec.go rename vendor/github.com/hashicorp/terraform/{config => configs}/configschema/doc.go (100%) create mode 100644 vendor/github.com/hashicorp/terraform/configs/configschema/empty_value.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/configschema/implied_type.go rename vendor/github.com/hashicorp/terraform/{config => configs}/configschema/internal_validate.go (83%) create mode 100644 vendor/github.com/hashicorp/terraform/configs/configschema/nestingmode_string.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/configschema/none_required.go rename vendor/github.com/hashicorp/terraform/{config => configs}/configschema/schema.go (75%) create mode 100644 vendor/github.com/hashicorp/terraform/configs/configschema/validate_traversal.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/depends_on.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/doc.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/module.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/module_call.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/module_merge.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/module_merge_body.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/named_values.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/parser.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/parser_config.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/parser_config_dir.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/parser_values.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/provider.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/provisioner.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/provisioneronfailure_string.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/provisionerwhen_string.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/resource.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/synth_body.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/util.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/variable_type_hint.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/variabletypehint_string.go create mode 100644 vendor/github.com/hashicorp/terraform/configs/version_constraint.go create mode 100644 vendor/github.com/hashicorp/terraform/helper/didyoumean/name_suggestion.go create mode 100644 vendor/github.com/hashicorp/terraform/helper/plugin/doc.go create mode 100644 vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provider.go create mode 100644 vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provisioner.go create mode 100644 vendor/github.com/hashicorp/terraform/helper/plugin/unknown.go create mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/grpc_test_provider.go create mode 100644 vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go create mode 100644 vendor/github.com/hashicorp/terraform/helper/schema/shims.go create mode 100644 vendor/github.com/hashicorp/terraform/internal/earlyconfig/config.go create mode 100644 vendor/github.com/hashicorp/terraform/internal/earlyconfig/config_build.go create mode 100644 vendor/github.com/hashicorp/terraform/internal/earlyconfig/diagnostics.go create mode 100644 vendor/github.com/hashicorp/terraform/internal/earlyconfig/doc.go create mode 100644 vendor/github.com/hashicorp/terraform/internal/earlyconfig/module.go create mode 100644 vendor/github.com/hashicorp/terraform/internal/initwd/copy_dir.go create mode 100644 vendor/github.com/hashicorp/terraform/internal/initwd/doc.go create mode 100644 vendor/github.com/hashicorp/terraform/internal/initwd/from_module.go create mode 100644 vendor/github.com/hashicorp/terraform/internal/initwd/getter.go create mode 100644 vendor/github.com/hashicorp/terraform/internal/initwd/inode.go create mode 100644 vendor/github.com/hashicorp/terraform/internal/initwd/inode_freebsd.go create mode 100644 vendor/github.com/hashicorp/terraform/internal/initwd/inode_windows.go create mode 100644 vendor/github.com/hashicorp/terraform/internal/initwd/load_config.go create mode 100644 vendor/github.com/hashicorp/terraform/internal/initwd/module_install.go create mode 100644 vendor/github.com/hashicorp/terraform/internal/initwd/module_install_hooks.go create mode 100644 vendor/github.com/hashicorp/terraform/internal/initwd/testing.go create mode 100644 vendor/github.com/hashicorp/terraform/internal/initwd/version_required.go create mode 100644 vendor/github.com/hashicorp/terraform/internal/modsdir/doc.go create mode 100644 vendor/github.com/hashicorp/terraform/internal/modsdir/manifest.go create mode 100644 vendor/github.com/hashicorp/terraform/internal/modsdir/paths.go create mode 100644 vendor/github.com/hashicorp/terraform/internal/tfplugin5/generate.sh create mode 100644 vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.pb.go create mode 100644 vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.proto create mode 100644 vendor/github.com/hashicorp/terraform/lang/blocktoattr/doc.go create mode 100644 vendor/github.com/hashicorp/terraform/lang/blocktoattr/fixup.go create mode 100644 vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go create mode 100644 vendor/github.com/hashicorp/terraform/lang/blocktoattr/variables.go create mode 100644 vendor/github.com/hashicorp/terraform/lang/data.go create mode 100644 vendor/github.com/hashicorp/terraform/lang/doc.go create mode 100644 vendor/github.com/hashicorp/terraform/lang/eval.go create mode 100644 vendor/github.com/hashicorp/terraform/lang/funcs/cidr.go create mode 100644 vendor/github.com/hashicorp/terraform/lang/funcs/collection.go create mode 100644 vendor/github.com/hashicorp/terraform/lang/funcs/conversion.go create mode 100644 vendor/github.com/hashicorp/terraform/lang/funcs/crypto.go create mode 100644 vendor/github.com/hashicorp/terraform/lang/funcs/datetime.go create mode 100644 vendor/github.com/hashicorp/terraform/lang/funcs/encoding.go create mode 100644 vendor/github.com/hashicorp/terraform/lang/funcs/filesystem.go create mode 100644 vendor/github.com/hashicorp/terraform/lang/funcs/number.go create mode 100644 vendor/github.com/hashicorp/terraform/lang/funcs/string.go create mode 100644 vendor/github.com/hashicorp/terraform/lang/functions.go create mode 100644 vendor/github.com/hashicorp/terraform/lang/references.go create mode 100644 vendor/github.com/hashicorp/terraform/lang/scope.go create mode 100644 vendor/github.com/hashicorp/terraform/plans/action.go create mode 100644 vendor/github.com/hashicorp/terraform/plans/action_string.go create mode 100644 vendor/github.com/hashicorp/terraform/plans/changes.go create mode 100644 vendor/github.com/hashicorp/terraform/plans/changes_src.go create mode 100644 vendor/github.com/hashicorp/terraform/plans/changes_state.go create mode 100644 vendor/github.com/hashicorp/terraform/plans/changes_sync.go create mode 100644 vendor/github.com/hashicorp/terraform/plans/doc.go create mode 100644 vendor/github.com/hashicorp/terraform/plans/dynamic_value.go create mode 100644 vendor/github.com/hashicorp/terraform/plans/objchange/all_null.go create mode 100644 vendor/github.com/hashicorp/terraform/plans/objchange/compatible.go create mode 100644 vendor/github.com/hashicorp/terraform/plans/objchange/doc.go create mode 100644 vendor/github.com/hashicorp/terraform/plans/objchange/lcs.go create mode 100644 vendor/github.com/hashicorp/terraform/plans/objchange/normalize_obj.go create mode 100644 vendor/github.com/hashicorp/terraform/plans/objchange/objchange.go create mode 100644 vendor/github.com/hashicorp/terraform/plans/objchange/plan_valid.go create mode 100644 vendor/github.com/hashicorp/terraform/plans/plan.go create mode 100644 vendor/github.com/hashicorp/terraform/plugin/convert/diagnostics.go create mode 100644 vendor/github.com/hashicorp/terraform/plugin/convert/schema.go create mode 100644 vendor/github.com/hashicorp/terraform/plugin/discovery/hashicorp.go create mode 100644 vendor/github.com/hashicorp/terraform/plugin/grpc_provider.go create mode 100644 vendor/github.com/hashicorp/terraform/plugin/grpc_provisioner.go create mode 100644 vendor/github.com/hashicorp/terraform/providers/addressed_types.go create mode 100644 vendor/github.com/hashicorp/terraform/providers/doc.go create mode 100644 vendor/github.com/hashicorp/terraform/providers/provider.go create mode 100644 vendor/github.com/hashicorp/terraform/providers/resolver.go create mode 100644 vendor/github.com/hashicorp/terraform/provisioners/doc.go create mode 100644 vendor/github.com/hashicorp/terraform/provisioners/factory.go create mode 100644 vendor/github.com/hashicorp/terraform/provisioners/provisioner.go create mode 100644 vendor/github.com/hashicorp/terraform/registry/regsrc/terraform_provider.go create mode 100644 vendor/github.com/hashicorp/terraform/registry/response/provider.go create mode 100644 vendor/github.com/hashicorp/terraform/registry/response/provider_list.go create mode 100644 vendor/github.com/hashicorp/terraform/registry/response/terraform_provider.go create mode 100644 vendor/github.com/hashicorp/terraform/states/doc.go create mode 100644 vendor/github.com/hashicorp/terraform/states/eachmode_string.go create mode 100644 vendor/github.com/hashicorp/terraform/states/instance_generation.go create mode 100644 vendor/github.com/hashicorp/terraform/states/instance_object.go create mode 100644 vendor/github.com/hashicorp/terraform/states/instance_object_src.go create mode 100644 vendor/github.com/hashicorp/terraform/states/module.go create mode 100644 vendor/github.com/hashicorp/terraform/states/objectstatus_string.go create mode 100644 vendor/github.com/hashicorp/terraform/states/output_value.go create mode 100644 vendor/github.com/hashicorp/terraform/states/resource.go create mode 100644 vendor/github.com/hashicorp/terraform/states/state.go create mode 100644 vendor/github.com/hashicorp/terraform/states/state_deepcopy.go create mode 100644 vendor/github.com/hashicorp/terraform/states/state_equal.go create mode 100644 vendor/github.com/hashicorp/terraform/states/state_string.go create mode 100644 vendor/github.com/hashicorp/terraform/states/statefile/diagnostics.go create mode 100644 vendor/github.com/hashicorp/terraform/states/statefile/doc.go create mode 100644 vendor/github.com/hashicorp/terraform/states/statefile/file.go create mode 100644 vendor/github.com/hashicorp/terraform/states/statefile/marshal_equal.go create mode 100644 vendor/github.com/hashicorp/terraform/states/statefile/read.go create mode 100644 vendor/github.com/hashicorp/terraform/states/statefile/version0.go create mode 100644 vendor/github.com/hashicorp/terraform/states/statefile/version1.go create mode 100644 vendor/github.com/hashicorp/terraform/states/statefile/version1_upgrade.go create mode 100644 vendor/github.com/hashicorp/terraform/states/statefile/version2.go create mode 100644 vendor/github.com/hashicorp/terraform/states/statefile/version2_upgrade.go create mode 100644 vendor/github.com/hashicorp/terraform/states/statefile/version3.go create mode 100644 vendor/github.com/hashicorp/terraform/states/statefile/version3_upgrade.go create mode 100644 vendor/github.com/hashicorp/terraform/states/statefile/version4.go create mode 100644 vendor/github.com/hashicorp/terraform/states/statefile/write.go create mode 100644 vendor/github.com/hashicorp/terraform/states/sync.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/context_input.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/debug.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_lang.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_resource.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_state_upgrade.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/evaluate.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/evaluate_valid.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_builder_eval.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_provider_eval.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_apply_instance.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy_deposed.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/provider_mock.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/provisioner_mock.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/semantics.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/state_add.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_attach_schema.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/valuesourcetype_string.go create mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/config_traversals.go create mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/contextual.go create mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/diagnostic_base.go create mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/sourceless.go create mode 100644 vendor/github.com/mattn/go-colorable/go.mod create mode 100644 vendor/github.com/mattn/go-colorable/go.sum create mode 100644 vendor/github.com/mattn/go-isatty/go.mod create mode 100644 vendor/github.com/mattn/go-isatty/go.sum delete mode 100644 vendor/github.com/mattn/go-isatty/isatty_appengine.go delete mode 100644 vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go create mode 100644 vendor/github.com/mitchellh/colorstring/.travis.yml create mode 100644 vendor/github.com/mitchellh/colorstring/LICENSE create mode 100644 vendor/github.com/mitchellh/colorstring/README.md create mode 100644 vendor/github.com/mitchellh/colorstring/colorstring.go create mode 100644 vendor/github.com/mitchellh/colorstring/go.mod create mode 100644 vendor/github.com/oklog/run/.gitignore create mode 100644 vendor/github.com/oklog/run/.travis.yml create mode 100644 vendor/github.com/oklog/run/LICENSE create mode 100644 vendor/github.com/oklog/run/README.md create mode 100644 vendor/github.com/oklog/run/group.go create mode 100644 vendor/github.com/posener/complete/go.mod create mode 100644 vendor/github.com/posener/complete/go.sum delete mode 100644 vendor/github.com/posener/complete/metalinter.json create mode 100644 vendor/github.com/spf13/afero/.travis.yml create mode 100644 vendor/github.com/spf13/afero/LICENSE.txt create mode 100644 vendor/github.com/spf13/afero/README.md create mode 100644 vendor/github.com/spf13/afero/afero.go create mode 100644 vendor/github.com/spf13/afero/appveyor.yml create mode 100644 vendor/github.com/spf13/afero/basepath.go create mode 100644 vendor/github.com/spf13/afero/cacheOnReadFs.go create mode 100644 vendor/github.com/spf13/afero/const_bsds.go create mode 100644 vendor/github.com/spf13/afero/const_win_unix.go create mode 100644 vendor/github.com/spf13/afero/copyOnWriteFs.go create mode 100644 vendor/github.com/spf13/afero/go.mod create mode 100644 vendor/github.com/spf13/afero/go.sum create mode 100644 vendor/github.com/spf13/afero/httpFs.go create mode 100644 vendor/github.com/spf13/afero/ioutil.go create mode 100644 vendor/github.com/spf13/afero/lstater.go create mode 100644 vendor/github.com/spf13/afero/match.go create mode 100644 vendor/github.com/spf13/afero/mem/dir.go create mode 100644 vendor/github.com/spf13/afero/mem/dirmap.go create mode 100644 vendor/github.com/spf13/afero/mem/file.go create mode 100644 vendor/github.com/spf13/afero/memmap.go create mode 100644 vendor/github.com/spf13/afero/os.go create mode 100644 vendor/github.com/spf13/afero/path.go create mode 100644 vendor/github.com/spf13/afero/readonlyfs.go create mode 100644 vendor/github.com/spf13/afero/regexpfs.go create mode 100644 vendor/github.com/spf13/afero/unionFile.go create mode 100644 vendor/github.com/spf13/afero/util.go create mode 100644 vendor/github.com/vmihailenco/msgpack/.travis.yml create mode 100644 vendor/github.com/vmihailenco/msgpack/CHANGELOG.md create mode 100644 vendor/github.com/vmihailenco/msgpack/LICENSE create mode 100644 vendor/github.com/vmihailenco/msgpack/Makefile create mode 100644 vendor/github.com/vmihailenco/msgpack/README.md create mode 100644 vendor/github.com/vmihailenco/msgpack/appengine.go create mode 100644 vendor/github.com/vmihailenco/msgpack/codes/codes.go create mode 100644 vendor/github.com/vmihailenco/msgpack/decode.go create mode 100644 vendor/github.com/vmihailenco/msgpack/decode_map.go create mode 100644 vendor/github.com/vmihailenco/msgpack/decode_number.go create mode 100644 vendor/github.com/vmihailenco/msgpack/decode_query.go create mode 100644 vendor/github.com/vmihailenco/msgpack/decode_slice.go create mode 100644 vendor/github.com/vmihailenco/msgpack/decode_string.go create mode 100644 vendor/github.com/vmihailenco/msgpack/decode_value.go create mode 100644 vendor/github.com/vmihailenco/msgpack/encode.go create mode 100644 vendor/github.com/vmihailenco/msgpack/encode_map.go create mode 100644 vendor/github.com/vmihailenco/msgpack/encode_number.go create mode 100644 vendor/github.com/vmihailenco/msgpack/encode_slice.go create mode 100644 vendor/github.com/vmihailenco/msgpack/encode_value.go create mode 100644 vendor/github.com/vmihailenco/msgpack/ext.go create mode 100644 vendor/github.com/vmihailenco/msgpack/msgpack.go create mode 100644 vendor/github.com/vmihailenco/msgpack/tag.go create mode 100644 vendor/github.com/vmihailenco/msgpack/time.go create mode 100644 vendor/github.com/vmihailenco/msgpack/types.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/conversion_object.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/conversion_tuple.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/mismatch_msg.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/unpredictable.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/msgpack/doc.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/msgpack/dynamic.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/msgpack/infinity.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/msgpack/marshal.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/msgpack/type_implied.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/msgpack/unknown.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/path_set.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/unknown_as_null.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/walk.go create mode 100644 vendor/golang.org/x/crypto/internal/chacha20/asm_arm64.s create mode 100644 vendor/golang.org/x/crypto/internal/chacha20/chacha_arm64.go create mode 100644 vendor/golang.org/x/crypto/poly1305/mac_noasm.go rename vendor/golang.org/x/crypto/poly1305/{sum_ref.go => sum_generic.go} (54%) delete mode 100644 vendor/golang.org/x/net/html/atom/atom.go delete mode 100644 vendor/golang.org/x/net/html/atom/gen.go delete mode 100644 vendor/golang.org/x/net/html/atom/table.go delete mode 100644 vendor/golang.org/x/net/html/const.go delete mode 100644 vendor/golang.org/x/net/html/doc.go delete mode 100644 vendor/golang.org/x/net/html/doctype.go delete mode 100644 vendor/golang.org/x/net/html/entity.go delete mode 100644 vendor/golang.org/x/net/html/escape.go delete mode 100644 vendor/golang.org/x/net/html/foreign.go delete mode 100644 vendor/golang.org/x/net/html/node.go delete mode 100644 vendor/golang.org/x/net/html/parse.go delete mode 100644 vendor/golang.org/x/net/html/render.go delete mode 100644 vendor/golang.org/x/net/html/token.go delete mode 100644 vendor/golang.org/x/net/trace/trace_go16.go delete mode 100644 vendor/golang.org/x/net/trace/trace_go17.go create mode 100644 vendor/golang.org/x/sys/cpu/byteorder.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo.c create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_mips64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_mipsx.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.s create mode 100644 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.pl create mode 100644 vendor/golang.org/x/sys/unix/mksyscall_solaris.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_solaris.pl create mode 100644 vendor/google.golang.org/appengine/.travis.yml create mode 100644 vendor/google.golang.org/appengine/CONTRIBUTING.md create mode 100644 vendor/google.golang.org/appengine/LICENSE create mode 100644 vendor/google.golang.org/appengine/README.md create mode 100644 vendor/google.golang.org/appengine/appengine.go create mode 100644 vendor/google.golang.org/appengine/appengine_vm.go create mode 100644 vendor/google.golang.org/appengine/datastore/datastore.go create mode 100644 vendor/google.golang.org/appengine/datastore/doc.go create mode 100644 vendor/google.golang.org/appengine/datastore/key.go create mode 100644 vendor/google.golang.org/appengine/datastore/load.go create mode 100644 vendor/google.golang.org/appengine/datastore/metadata.go create mode 100644 vendor/google.golang.org/appengine/datastore/prop.go create mode 100644 vendor/google.golang.org/appengine/datastore/query.go create mode 100644 vendor/google.golang.org/appengine/datastore/save.go create mode 100644 vendor/google.golang.org/appengine/datastore/transaction.go create mode 100644 vendor/google.golang.org/appengine/errors.go create mode 100644 vendor/google.golang.org/appengine/go.mod create mode 100644 vendor/google.golang.org/appengine/go.sum create mode 100644 vendor/google.golang.org/appengine/identity.go create mode 100644 vendor/google.golang.org/appengine/internal/api.go create mode 100644 vendor/google.golang.org/appengine/internal/api_classic.go create mode 100644 vendor/google.golang.org/appengine/internal/api_common.go create mode 100644 vendor/google.golang.org/appengine/internal/app_id.go create mode 100644 vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.proto create mode 100644 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto create mode 100644 vendor/google.golang.org/appengine/internal/identity.go create mode 100644 vendor/google.golang.org/appengine/internal/identity_classic.go create mode 100644 vendor/google.golang.org/appengine/internal/identity_flex.go create mode 100644 vendor/google.golang.org/appengine/internal/identity_vm.go create mode 100644 vendor/google.golang.org/appengine/internal/internal.go create mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/main.go create mode 100644 vendor/google.golang.org/appengine/internal/main_common.go create mode 100644 vendor/google.golang.org/appengine/internal/main_vm.go create mode 100644 vendor/google.golang.org/appengine/internal/metadata.go create mode 100644 vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/modules/modules_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/net.go create mode 100644 vendor/google.golang.org/appengine/internal/regen.sh create mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto create mode 100644 vendor/google.golang.org/appengine/internal/transaction.go create mode 100644 vendor/google.golang.org/appengine/namespace.go create mode 100644 vendor/google.golang.org/appengine/timeout.go create mode 100644 vendor/google.golang.org/appengine/travis_install.sh create mode 100644 vendor/google.golang.org/appengine/travis_test.sh create mode 100644 vendor/google.golang.org/grpc/test/bufconn/bufconn.go diff --git a/go.mod b/go.mod index 262f86ae1a..27eeb5470e 100644 --- a/go.mod +++ b/go.mod @@ -4,40 +4,9 @@ require ( contrib.go.opencensus.io/exporter/ocagent v0.4.2 // indirect github.com/Azure/azure-sdk-for-go v24.1.0+incompatible github.com/Azure/go-autorest v11.2.8+incompatible - github.com/agext/levenshtein v1.2.1 // indirect - github.com/apparentlymart/go-cidr v0.0.0-20170418151526-7e4b007599d4 // indirect - github.com/apparentlymart/go-textseg v0.0.0-20170531203952-b836f5c4d331 // indirect - github.com/aws/aws-sdk-go v1.8.34 // indirect - github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect - github.com/blang/semver v3.5.1+incompatible // indirect - github.com/davecgh/go-spew v1.1.0 // indirect - github.com/fsouza/go-dockerclient v0.0.0-20160427172547-1d4f4ae73768 // indirect - github.com/go-ini/ini v1.23.1 // indirect github.com/gofrs/uuid v3.2.0+incompatible github.com/google/uuid v0.0.0-20170814143639-7e072fc3a7be - github.com/hashicorp/go-azure-helpers v0.0.0-20181211121309-38db96513363 - github.com/hashicorp/go-cleanhttp v0.0.0-20170211013415-3573b8b52aa7 // indirect - github.com/hashicorp/go-getter v0.0.0-20180226183729-64040d90d4ab // indirect - github.com/hashicorp/go-hclog v0.0.0-20170903163258-8105cc0a3736 // indirect - github.com/hashicorp/go-plugin v0.0.0-20170816151819-a5174f84d7f8 // indirect - github.com/hashicorp/go-uuid v0.0.0-20160120003506-36289988d83c - github.com/hashicorp/go-version v0.0.0-20161031182605-e96d38404026 // indirect - github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f // indirect - github.com/hashicorp/hcl2 v0.0.0-20180227155456-998a3053e207 // indirect - github.com/hashicorp/hil v0.0.0-20170512213305-fac2259da677 // indirect - github.com/hashicorp/logutils v0.0.0-20150609070431-0dc08b1671f3 // indirect - github.com/hashicorp/terraform v0.11.9 - github.com/hashicorp/yamux v0.0.0-20160720233140-d1caa6c97c9f // indirect - github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7 // indirect - github.com/mitchellh/cli v1.0.0 // indirect - github.com/mitchellh/copystructure v1.0.0 // indirect - github.com/mitchellh/go-testing-interface v1.0.0 // indirect - github.com/mitchellh/go-wordwrap v1.0.0 // indirect - github.com/mitchellh/hashstructure v1.0.0 // indirect - github.com/mitchellh/mapstructure v1.1.2 // indirect - github.com/ulikunitz/xz v0.5.4 // indirect - github.com/zclconf/go-cty v0.0.0-20180227163247-7166230c635f // indirect - golang.org/x/sys v0.0.0-20190121090251-770c60269bf0 // indirect - google.golang.org/genproto v0.0.0-20190111180523-db91494dd46c // indirect - google.golang.org/grpc v1.17.0 // indirect + github.com/hashicorp/go-azure-helpers v0.0.0-20190129193224-166dfd221bb2 + github.com/hashicorp/go-uuid v1.0.1 + github.com/hashicorp/terraform v0.12.0-alpha4.0.20190417210818-177a7afb781f ) diff --git a/go.sum b/go.sum index afd943db7f..8ddc5ca637 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,14 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.36.0 h1:+aCSj7tOo2LODWVEuZDZeGCckdt6MlSF+X/rB3wUiS8= +cloud.google.com/go v0.36.0/go.mod h1:RUoy9p/M4ge0HzT8L+SDZ8jg+Q6fth0CiBuhFJpSV40= contrib.go.opencensus.io/exporter/ocagent v0.4.2 h1:EjvhWhqxJpIUEBcTJoUDUyScfZ/30ehPEvDmvj9v4DA= contrib.go.opencensus.io/exporter/ocagent v0.4.2/go.mod h1:YuG83h+XWwqWjvCqn7vK4KSyLKhThY3+gNGQ37iS2V0= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/Azure/azure-sdk-for-go v21.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v24.1.0+incompatible h1:P7GocB7bhkyGbRL1tCy0m9FDqb1V/dqssch3jZieUHk= @@ -8,17 +16,39 @@ github.com/Azure/azure-sdk-for-go v24.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9mo github.com/Azure/go-autorest v10.15.4+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v11.2.8+incompatible h1:Q2feRPMlcfVcqz3pF87PJzkm5lZrL+x6BDtzhODzNJM= github.com/Azure/go-autorest v11.2.8+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4 h1:pSm8mp0T2OH2CPmPDPtwHPr3VAQaOwVF/JbllOPP4xA= +github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/ChrisTrenkamp/goxpath v0.0.0-20170922090931-c385f95c6022 h1:y8Gs8CzNfDF5AZvjr+5UyGQvQEBL7pwo+v+wX6q9JI8= +github.com/ChrisTrenkamp/goxpath v0.0.0-20170922090931-c385f95c6022/go.mod h1:nuWgzSkT5PnyOd+272uUmV0dnAnAn42Mk7PiQC5VzN4= +github.com/Unknwon/com v0.0.0-20151008135407-28b053d5a292 h1:tuQ7w+my8a8mkwN7x2TSd7OzTjkZ7rAeSyH4xncuAMI= +github.com/Unknwon/com v0.0.0-20151008135407-28b053d5a292/go.mod h1:KYCjqMOeHpNuTOiFQU6WEcTG7poCJrUs0YgyHNtn1no= +github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw= github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8= github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/apparentlymart/go-cidr v0.0.0-20170418151526-7e4b007599d4 h1:bpmA3CCh0K829XIR5kfcV+YDt+Gwi7SEYPCcYEVKWUo= -github.com/apparentlymart/go-cidr v0.0.0-20170418151526-7e4b007599d4/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= -github.com/apparentlymart/go-textseg v0.0.0-20170531203952-b836f5c4d331 h1:AIKxo1t7QE7MAqADwrmzMiaFC+QfHfXOk8lrmibN5Lk= -github.com/apparentlymart/go-textseg v0.0.0-20170531203952-b836f5c4d331/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= -github.com/armon/go-radix v0.0.0-20170727155443-1fca145dffbc/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= +github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agl/ed25519 v0.0.0-20150830182803-278e1ec8e8a6/go.mod h1:WPjqKcmVOxf0XSf3YxCJs6N6AOSrOx3obionmG7T0y0= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/antchfx/xpath v0.0.0-20190129040759-c8489ed3251e/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= +github.com/antchfx/xquery v0.0.0-20180515051857-ad5b8c7a47b0/go.mod h1:LzD22aAzDP8/dyiCKFp31He4m2GPjl0AFyzDtZzUu9M= +github.com/apparentlymart/go-cidr v1.0.0 h1:lGDvXx8Lv9QHjrAVP7jyzleG4F9+FkRhJcEsDFxeb8w= +github.com/apparentlymart/go-cidr v1.0.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= +github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= +github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 h1:MzVXffFUye+ZcSR6opIgz9Co7WcDx6ZcY+RjfFHoA0I= +github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= +github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0= +github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= +github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 h1:7Ip0wMmLHLRJdrloDxZfhMm0xrLXZS8+COSu2bXmEQs= +github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go v1.8.34 h1:zDNBtR25rYYUi7aEh2HRa2f2WTm6LSHWSAMCNY2WdXA= -github.com/aws/aws-sdk-go v1.8.34/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= +github.com/aws/aws-sdk-go v1.16.36 h1:POeH34ZME++pr7GBGh+ZO6Y5kOwSMQpqp5BGUgooJ6k= +github.com/aws/aws-sdk-go v1.16.36/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= @@ -26,145 +56,423 @@ github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/bsm/go-vlq v0.0.0-20150828105119-ec6e8d4f5f4e/go.mod h1:N+BjUcTjSxc2mtRGSCPsat1kze3CUtvJN3/jTXlp29k= github.com/census-instrumentation/opencensus-proto v0.1.0-0.20181214143942-ba49f56771b8 h1:gUqsFVdUKoRHNg8fkFd8gB5OOEa/g5EwlAHznb4zjbI= github.com/census-instrumentation/opencensus-proto v0.1.0-0.20181214143942-ba49f56771b8/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20161106042343-c914be64f07d h1:aG5FcWiZTOhPQzYIxwxSR1zEOxzL32fwr1CsaCfhO6w= +github.com/chzyer/readline v0.0.0-20161106042343-c914be64f07d/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/bbolt v1.3.0/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v0.0.0-20160617170158-f0777076321a/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dimchansky/utfbom v0.0.0-20170328061312-6c6132ff69f0/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/dimchansky/utfbom v1.0.0 h1:fGC2kkf4qOoKqZ4q7iIh+Vef4ubC1c38UDsEyZynZPc= github.com/dimchansky/utfbom v1.0.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= -github.com/fatih/color v0.0.0-20181010231311-3f9d52f7176a/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/dnaeon/go-vcr v0.0.0-20180920040454-5637cf3d8a31/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dylanmei/iso8601 v0.1.0 h1:812NGQDBcqquTfH5Yeo7lwR0nzx/cKdsmf3qMjPURUI= +github.com/dylanmei/iso8601 v0.1.0/go.mod h1:w9KhXSgIyROl1DefbMYIE7UVSIvELTbMrCfx+QkYnoQ= +github.com/dylanmei/winrmtest v0.0.0-20190225150635-99b7fe2fddf1/go.mod h1:lcy9/2gH1jn/VCLouHA6tOEwLoNVd4GW6zhuKLmHC2Y= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fsouza/go-dockerclient v0.0.0-20160427172547-1d4f4ae73768/go.mod h1:KpcjM623fQYE9MZiTGzKhjfxXAV9wbyX2C1cyRHfhl0= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-ini/ini v1.23.1 h1:amNPHl+tCb4BolL2NAIQaKLY+ZiL1Ju7OqZ9Fx6PTBQ= -github.com/go-ini/ini v1.23.1/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-test/deep v1.0.1 h1:UQhStjbkDClarlmv0am7OXXO4/GaPdCGiUiMTvi28sg= +github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v0.0.0-20190109072247-347cf4a86c1c/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0 h1:kbxbvI4Un1LUWKxufD+BiE6AEExYYgkQLQmLFqA1LFk= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/uuid v0.0.0-20170814143639-7e072fc3a7be h1:JX31ns0WPRsUGmZXMlMoJta76MW+0UM7+JnCmqxDUVs= github.com/google/uuid v0.0.0-20170814143639-7e072fc3a7be/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3 h1:siORttZ36U2R/WjiJuDz8znElWBiAlO9rVt+mqJt0Cc= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/gophercloud/gophercloud v0.0.0-20190208042652-bc37892e1968 h1:Pu+HW4kcQozw0QyrTTgLE+3RXNqFhQNNzhbnoLFL83c= +github.com/gophercloud/gophercloud v0.0.0-20190208042652-bc37892e1968/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= +github.com/gophercloud/utils v0.0.0-20190128072930-fbb6ab446f01 h1:OgCNGSnEalfkRpn//WGJHhpo7fkP+LhTpvEITZ7CkK4= +github.com/gophercloud/utils v0.0.0-20190128072930-fbb6ab446f01/go.mod h1:wjDF8z83zTeg5eMLml5EBSlAhbF7G8DobyI1YsMuyzw= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.5.0 h1:WcmKMm43DR7RdtlkEXQJyo5ws8iTp98CyhCCbOHMvNI= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.5.1 h1:3scN4iuXkNOyP98jF55Lv8a9j1o/IwvnDIZ0LHJK1nk= +github.com/grpc-ecosystem/grpc-gateway v1.5.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/hashicorp/aws-sdk-go-base v0.2.0 h1:5bjZnWCvQg9Im5CHZr9t90IaFC4uvVlMl2fTh23IoCk= +github.com/hashicorp/aws-sdk-go-base v0.2.0/go.mod h1:ZIWACGGi0N7a4DZbf15yuE1JQORmWLtBcVM6F5SXNFU= +github.com/hashicorp/consul v0.0.0-20171026175957-610f3c86a089 h1:1eDpXAxTh0iPv+1kc9/gfSI2pxRERDsTk/lNGolwHn8= +github.com/hashicorp/consul v0.0.0-20171026175957-610f3c86a089/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI= github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-azure-helpers v0.0.0-20181211121309-38db96513363 h1:9d0jVlMpgfMTdL/QSsRDQsLenOQZIHp5cEBiWSYc8w4= -github.com/hashicorp/go-azure-helpers v0.0.0-20181211121309-38db96513363/go.mod h1:Y5ejHZY3jQby82dOASJzyQ2xZw37zs+D5x6AaOC6O5E= -github.com/hashicorp/go-cleanhttp v0.0.0-20170211013415-3573b8b52aa7 h1:67fHcS+inUoiIqWCKIqeDuq2AlPHNHPiTqp97LdQ+bc= -github.com/hashicorp/go-cleanhttp v0.0.0-20170211013415-3573b8b52aa7/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-getter v0.0.0-20180226183729-64040d90d4ab h1:ap3umB+Fouf/i1/N1q0AZLjOxSg1s8RIQ159wc7wNPE= -github.com/hashicorp/go-getter v0.0.0-20180226183729-64040d90d4ab/go.mod h1:6rdJFnhkXnzGOJbvkrdv4t9nLwKcVA+tmbQeUlkIzrU= -github.com/hashicorp/go-hclog v0.0.0-20170903163258-8105cc0a3736 h1:o62AOlCZLFgztwH3h06WnKRL9OrxVEn+fqPDQC26f68= -github.com/hashicorp/go-hclog v0.0.0-20170903163258-8105cc0a3736/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= -github.com/hashicorp/go-multierror v0.0.0-20150916205742-d30f09973e19/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-azure-helpers v0.0.0-20190129193224-166dfd221bb2 h1:VBRx+yPYUZaobnn5ANBcOUf4hhWpTHSQgftG4TcDkhI= +github.com/hashicorp/go-azure-helpers v0.0.0-20190129193224-166dfd221bb2/go.mod h1:lu62V//auUow6k0IykxLK2DCNW8qTmpm8KqhYVWattA= +github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU= +github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= +github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-getter v1.1.0 h1:iGVeg7L4V5FTFV3D6w+1NAyvth7BIWWSzD60pWloe2Q= +github.com/hashicorp/go-getter v1.1.0/go.mod h1:q+PoBhh16brIKwJS9kt18jEtXHTg2EGkmrA9P7HVS+U= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.0.0-20181001195459-61d530d6c27f h1:Yv9YzBlAETjy6AOX9eLBZ3nshNVRREgerT/3nvxlGho= +github.com/hashicorp/go-hclog v0.0.0-20181001195459-61d530d6c27f/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-immutable-radix v0.0.0-20180129170900-7f3cd4390caa/go.mod h1:6ij3Z20p+OhOkCSrA0gImAWoHYQRGbnlcuk6XYTiaRw= +github.com/hashicorp/go-msgpack v0.0.0-20150518234257-fa3f63826f7c/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v0.0.0-20180717150148-3d5d8f294aa0/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-plugin v0.0.0-20170816151819-a5174f84d7f8 h1:1M/vS848XLVjNyE7waQYdRErztf5MU33jo5IPBfzLag= -github.com/hashicorp/go-plugin v0.0.0-20170816151819-a5174f84d7f8/go.mod h1:JSqWYsict+jzcj0+xElxyrBQRPNoiWQuddnxArJ7XHQ= -github.com/hashicorp/go-uuid v0.0.0-20160120003506-36289988d83c h1:74JBd5YKJzNH8YVYfXI2LVmLdRj2udNzzysg9Y0x2dw= -github.com/hashicorp/go-uuid v0.0.0-20160120003506-36289988d83c/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v0.0.0-20161031182605-e96d38404026 h1:qWx/DcC6l4ZzuS+JBAzI5XjtLFDCc08zYeZ0kLnaH2g= -github.com/hashicorp/go-version v0.0.0-20161031182605-e96d38404026/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-plugin v0.0.0-20190322172744-52e1c4730856 h1:FHiCaU46W1WoqApsaGGIKbNkhQ6v71hJrOf2INQMLUo= +github.com/hashicorp/go-plugin v0.0.0-20190322172744-52e1c4730856/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-retryablehttp v0.5.2 h1:AoISa4P4IsW0/m4T6St8Yw38gTl5GtBAgfkhYh1xAz4= +github.com/hashicorp/go-retryablehttp v0.5.2/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= +github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= +github.com/hashicorp/go-slug v0.3.0 h1:L0c+AvH/J64iMNF4VqRaRku2DMTEuHioPVS7kMjWIU8= +github.com/hashicorp/go-slug v0.3.0/go.mod h1:I5tq5Lv0E2xcNXNkmx7BSfzi1PsJ2cNjs3cC3LwyhK8= +github.com/hashicorp/go-sockaddr v0.0.0-20180320115054-6d291a969b86/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-tfe v0.3.14 h1:1eWmq4RAICGufydNUWu7ahb0gtq24pN9jatD2FkdxdE= +github.com/hashicorp/go-tfe v0.3.14/go.mod h1:SuPHR+OcxvzBZNye7nGPfwZTEyd3rWPfLVbCgyZPezM= +github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.1.0 h1:bPIoEKD27tNdebFGGxxYwcL4nepeY4j1QP23PFRGzg0= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f h1:UdxlrJz4JOnY8W+DbLISwf2B8WXEolNRA8BGCwI9jws= github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= -github.com/hashicorp/hcl2 v0.0.0-20180227155456-998a3053e207 h1:HUmEuWAM2YvL4fQyjw6K/l4HEBMqSHNNDTxKC1uLdKc= -github.com/hashicorp/hcl2 v0.0.0-20180227155456-998a3053e207/go.mod h1:xp1eMAxqhQKBxz+yQUTsig9bBMRRWRWw+rK3FJmHf/A= -github.com/hashicorp/hil v0.0.0-20170512213305-fac2259da677 h1:Yj0RcrbLT/5z2k1UdRW+2r0nDUgrKjUNLQiXzMU4a5k= -github.com/hashicorp/hil v0.0.0-20170512213305-fac2259da677/go.mod h1:KHvg/R2/dPtaePb16oW4qIyzkMxXOL38xjRN64adsts= -github.com/hashicorp/logutils v0.0.0-20150609070431-0dc08b1671f3 h1:oD64EFjELI9RY9yoWlfua58r+etdnoIC871z+rr6lkA= -github.com/hashicorp/logutils v0.0.0-20150609070431-0dc08b1671f3/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/terraform v0.11.9 h1:dL0cB5xsIg/rjySx1TVEnVg/FWdB03+jcKJ0O48FqPI= -github.com/hashicorp/terraform v0.11.9/go.mod h1:uN1KUiT7Wdg61fPwsGXQwK3c8PmpIVZrt5Vcb1VrSoM= -github.com/hashicorp/yamux v0.0.0-20160720233140-d1caa6c97c9f h1:K4RDeor/qhbs5ETM85SN8xekXkk+KkOBclNXXM8+UR0= -github.com/hashicorp/yamux v0.0.0-20160720233140-d1caa6c97c9f/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7 h1:SMvOWPJCES2GdFracYbBQh93GXac8fq7HeN6JnpduB8= -github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/hashicorp/hcl2 v0.0.0-20181208003705-670926858200/go.mod h1:ShfpTh661oAaxo7VcNxg0zcZW6jvMa7Moy2oFx7e5dE= +github.com/hashicorp/hcl2 v0.0.0-20190416162332-2c5a4b7d729a h1:doKt9ZBCYgYQrGK6CqJsEB+8xqm3WoFyKu4TPZlyymg= +github.com/hashicorp/hcl2 v0.0.0-20190416162332-2c5a4b7d729a/go.mod h1:HtEzazM5AZ9fviNEof8QZB4T1Vz9UhHrGhnMPzl//Ek= +github.com/hashicorp/hil v0.0.0-20190212112733-ab17b08d6590 h1:2yzhWGdgQUWZUCNK+AoO35V+HTsgEmcM4J9IkArh7PI= +github.com/hashicorp/hil v0.0.0-20190212112733-ab17b08d6590/go.mod h1:n2TSygSNwsLJ76m8qFXTSc7beTb+auJxYdqrnoqwZWE= +github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/memberlist v0.1.0/go.mod h1:ncdBp14cuox2iFOq3kDiquKU6fqsTBc3W6JvZwjxxsE= +github.com/hashicorp/serf v0.0.0-20160124182025-e4ec8cc423bb h1:ZbgmOQt8DOg796figP87/EFCVx2v2h9yRvwHF/zceX4= +github.com/hashicorp/serf v0.0.0-20160124182025-e4ec8cc423bb/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5IgwbNrE= +github.com/hashicorp/terraform v0.12.0-alpha4.0.20190417210818-177a7afb781f h1:Gl7Ig3E2XpntOCaGMZrMR9zSfNJl4JXHiqP64zJCzak= +github.com/hashicorp/terraform v0.12.0-alpha4.0.20190417210818-177a7afb781f/go.mod h1:A3NsI7WT87OMgpcD15cu6dK2YNpihchZp5fxUf8EHBg= +github.com/hashicorp/terraform-config-inspect v0.0.0-20190327195015-8022a2663a70 h1:oZm5nE11yhzsTRz/YrUyDMSvixePqjoZihwn8ipuOYI= +github.com/hashicorp/terraform-config-inspect v0.0.0-20190327195015-8022a2663a70/go.mod h1:ItvqtvbC3K23FFET62ZwnkwtpbKZm8t8eMcWjmVVjD8= +github.com/hashicorp/vault v0.10.4/go.mod h1:KfSyffbKxoVyspOdlaGVjIuwLobi07qD1bAbosPMpP0= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/joyent/triton-go v0.0.0-20180313100802-d8f9c0314926 h1:kie3qOosvRKqwij2HGzXWffwpXvcqfPPXRUw8I4F/mg= +github.com/joyent/triton-go v0.0.0-20180313100802-d8f9c0314926/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/mattn/go-colorable v0.0.0-20180310133214-efa589957cd0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lusis/go-artifactory v0.0.0-20160115162124-7e4ce345df82 h1:wnfcqULT+N2seWf6y4yHzmi7GD2kNx4Ute0qArktD48= +github.com/lusis/go-artifactory v0.0.0-20160115162124-7e4ce345df82/go.mod h1:y54tfGmO3NKssKveTEFFzH8C/akrSOy/iW9qEAUDV84= +github.com/marstr/guid v1.1.0 h1:/M4H/1G4avsieL6BbUwCOBzulmoeKVP5ux/3mQNnbyI= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/masterzen/simplexml v0.0.0-20160608183007-4572e39b1ab9 h1:SmVbOZFWAlyQshuMfOkiAx1f5oUTsOGG5IXplAEYeeM= +github.com/masterzen/simplexml v0.0.0-20160608183007-4572e39b1ab9/go.mod h1:kCEbxUJlNDEBNbdQMkPSp6yaKcRXVI6f4ddk8Riv4bc= +github.com/masterzen/winrm v0.0.0-20190223112901-5e5c9a7fe54b h1:/1RFh2SLCJ+tEnT73+Fh5R2AO89sQqs8ba7o+hx1G0Y= +github.com/masterzen/winrm v0.0.0-20190223112901-5e5c9a7fe54b/go.mod h1:wr1VqkwW0AB5JS0QLy5GpVMS9E3VtRoSYXUYyVk46KY= github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.0-20170925054904-a5cdd64afdee/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-isatty v0.0.3 h1:ns/ykhmWi7G9O+8a448SecJU3nSMBXJfqQkl0upE1jI= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-shellwords v1.0.4 h1:xmZZyxuP+bYKAKkA9ABYXVNJ+G/Wf3R8d8vAP3LDJJk= +github.com/mattn/go-shellwords v1.0.4/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/miekg/dns v1.0.8/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mitchellh/cli v1.0.0 h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-linereader v0.0.0-20190213213312-1b945b3263eb h1:GRiLv4rgyqjqzxbhJke65IYUf4NCOOvrPOJbV/sPxkM= +github.com/mitchellh/go-linereader v0.0.0-20190213213312-1b945b3263eb/go.mod h1:OaY7UOoTkkrX3wRwjpYRKafIkkyeD0UtweSHAWWiqQM= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/hashstructure v1.0.0 h1:ZkRJX1CyOoTkar7p/mLS5TZU4nJ1Rn/F8u9dGS02Q3Y= github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/panicwrap v0.0.0-20190213213626-17011010aaa4 h1:jw9tsdJ1FQmUkyTXdIF/nByTX+mMnnp16glnvGZMsC4= +github.com/mitchellh/panicwrap v0.0.0-20190213213626-17011010aaa4/go.mod h1:YYMf4xtQnR8LRC0vKi3afvQ5QwRPQ17zjcpkBCufb+I= +github.com/mitchellh/prefixedio v0.0.0-20190213213902-5733675afd51 h1:eD92Am0Qf3rqhsOeA1zwBHSfRkoHrt4o6uORamdmJP8= +github.com/mitchellh/prefixedio v0.0.0-20190213213902-5733675afd51/go.mod h1:kB1naBgV9ORnkiTVeyJOI1DavaJkG4oNIq0Af6ZVKUo= github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d h1:VhgPp6v9qf9Agr/56bj7Y/xa04UccTW04VP0Qed4vnQ= +github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= -github.com/posener/complete v0.0.0-20170908125245-88e59760adad/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/packer-community/winrmcp v0.0.0-20180102160824-81144009af58 h1:m3CEgv3ah1Rhy82L+c0QG/U3VyY1UsvsIdkh0/rU97Y= +github.com/packer-community/winrmcp v0.0.0-20180102160824-81144009af58/go.mod h1:f6Izs6JvFTdnRbziASagjZ2vmf55NSIkC/weStxCHqk= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pkg/errors v0.0.0-20170505043639-c605e284fe17 h1:chPfVn+gpAM5CTpTyVU9j8J+xgRGwmoDlNDLjKnJiYo= +github.com/pkg/errors v0.0.0-20170505043639-c605e284fe17/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1 h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.1 h1:LrvDIY//XNo65Lq84G/akBuMGlawHvGBABv8f/ZN6DI= +github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/ulikunitz/xz v0.5.4 h1:zATC2OoZ8H1TZll3FpbX+ikwmadbO699PE06cIkm9oU= -github.com/ulikunitz/xz v0.5.4/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= -github.com/zclconf/go-cty v0.0.0-20180227163247-7166230c635f h1:OkKoSRyYPHTuUJcbnjUPsuW+qzkxkqQxd8zJjZcsTc0= -github.com/zclconf/go-cty v0.0.0-20180227163247-7166230c635f/go.mod h1:LnDKxj8gN4aatfXUqmUNooaDjvmDcLPbAN3hYBIVoJE= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sirupsen/logrus v1.1.1/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/spf13/afero v1.2.1 h1:qgMbHoJbPbw579P+1zVY+6n4nIFuIchaIjzZ/I/Yq8M= +github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d h1:Z4EH+5EffvBEhh37F0C0DnpklTMh00JOkjW5zK3ofBI= +github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d/go.mod h1:BSTlc8jOjh0niykqEGVXOLXdi9o0r0kR8tCYiMvjFgw= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/terraform-providers/terraform-provider-openstack v1.15.0 h1:adpjqej+F8BAX9dHmuPF47sUIkgifeqBu6p7iCsyj0Y= +github.com/terraform-providers/terraform-provider-openstack v1.15.0/go.mod h1:2aQ6n/BtChAl1y2S60vebhyJyZXBsuAI5G4+lHrT1Ew= +github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v0.0.0-20180813092308-00b869d2f4a5 h1:cMjKdf4PxEBN9K5HaD9UMW8gkTbM0kMzkTa9SJe0WNQ= +github.com/ugorji/go v0.0.0-20180813092308-00b869d2f4a5/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= +github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok= +github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= +github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/vmihailenco/msgpack v4.0.1+incompatible h1:RMF1enSPeKTlXrXdOcqjFUElywVZjjC6pqse21bKbEU= +github.com/vmihailenco/msgpack v4.0.1+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= +github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= +github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/treeprint v0.0.0-20161029104018-1d6e34225557 h1:Jpn2j6wHkC9wJv5iMfJhKqrZJx3TahFx+7sbZ7zQdxs= +github.com/xlab/treeprint v0.0.0-20161029104018-1d6e34225557/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/zclconf/go-cty v0.0.0-20181129180422-88fbe721e0f8/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= +github.com/zclconf/go-cty v0.0.0-20190124225737-a385d646c1e9/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= +github.com/zclconf/go-cty v0.0.0-20190320224746-fd76348b9329 h1:ne520NlvoncW5zfBGkmP4EJhyd6ruSaSyhzobv0Vz9w= +github.com/zclconf/go-cty v0.0.0-20190320224746-fd76348b9329/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= go.opencensus.io v0.18.0 h1:Mk5rgZcggtbvtAun5aJzAtjKKN/t0R3jJPlWILlv938= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.18.1-0.20181204023538-aab39bd6a98b h1:6ayHMBPtdP3jNuk+Sfhso+PTB7ZJQ5E1FBo403m2H8w= go.opencensus.io v0.18.1-0.20181204023538-aab39bd6a98b/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20180816225734-aabede6cba87/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869 h1:kkXA53yGe04D0adEYJwEVQjeBppL01Exg+fnMjfUraU= golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190222235706-ffb98f73852f h1:qWFY9ZxP3tfI37wYIs/MnIAqK0vlXp1xnYEa5HxFSSY= +golang.org/x/crypto v0.0.0-20190222235706-ffb98f73852f/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/net v0.0.0-20170927055102-0a9397675ba3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181106065722-10aee1819953 h1:LuZIitY8waaxUfNIdtajyE/YzA/zyf0YxXG27VpLrkg= golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181129055619-fae4c4e3ad76/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd h1:HuTn7WObtcDo9uEEU7rEqL0jYthdXAmZ6PP+meazmaU= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190220154721-9b3c75971fc9 h1:pfyU+l9dEu0vZzDDMsdAKa1gZbJYEn6urYXj/+Xkz7s= +golang.org/x/oauth2 v0.0.0-20190220154721-9b3c75971fc9/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190121090251-770c60269bf0 h1:kTCXMFd85FwiS5g4PBFOimFnVyc7jqgrMI8TZIq/W/Y= -golang.org/x/sys v0.0.0-20190121090251-770c60269bf0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.0.0-20170915090833-1cbadb444a80/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181213200352-4d1cda033e06/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf h1:rjxqQmxjyqerRKEj+tZW+MCm4LgpFXu18bsEoCMgDsk= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0 h1:K6z2u68e86TPdSdefXdzvXgR1zEMa+459vBSfWYAZkI= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190111180523-db91494dd46c h1:LZllHYjdJnynBfmwysp+s4yhMzfc+3BzhdqzAMvwjoc= -google.golang.org/genproto v0.0.0-20190111180523-db91494dd46c/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922 h1:mBVYJnbrXLA/ZCBTCe7PtEgAUP+1bg92qTaFoPHdz+8= +google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922/go.mod h1:L3J43x8/uS+qIUoksaLKe6OS3nUKxOKuIFz1sl2/jx4= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.15.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0 h1:TRJYBgMclJvGYn2rIMjj+h9KtMt5r1Ij7ODVRIZkwhk= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.18.0 h1:IZl7mfBGfbhYx2p2rKRtYgDFw6SBz+kclmxYrCksPPA= +google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/vmihailenco/msgpack.v2 v2.9.1/go.mod h1:/3Dn1Npt9+MYyLpYYXjInO/5jvMLamn+AEGwNEOatn8= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= +labix.org/v2/mgo v0.0.0-20140701140051-000000000287/go.mod h1:Lg7AYkt1uXJoR9oeSZ3W/8IXLdvOfIITgZnommstyz4= +launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/vendor/github.com/agext/levenshtein/.gitignore b/vendor/github.com/agext/levenshtein/.gitignore index 404365f638..4473da19b2 100644 --- a/vendor/github.com/agext/levenshtein/.gitignore +++ b/vendor/github.com/agext/levenshtein/.gitignore @@ -1,2 +1,53 @@ +# Ignore docs files +_gh_pages +_site + +# Ignore temporary files README.html coverage.out +.tmp + +# Numerous always-ignore extensions +*.diff +*.err +*.log +*.orig +*.rej +*.swo +*.swp +*.vi +*.zip +*~ + +# OS or Editor folders +._* +.cache +.DS_Store +.idea +.project +.settings +.tmproj +*.esproj +*.sublime-project +*.sublime-workspace +nbproject +Thumbs.db + +# Komodo +.komodotools +*.komodoproject + +# SCSS-Lint +scss-lint-report.xml + +# grunt-contrib-sass cache +.sass-cache + +# Jekyll metadata +docs/.jekyll-metadata + +# Folders to ignore +.build +.test +bower_components +node_modules diff --git a/vendor/github.com/agext/levenshtein/.travis.yml b/vendor/github.com/agext/levenshtein/.travis.yml index 95be94af9f..a51a144660 100644 --- a/vendor/github.com/agext/levenshtein/.travis.yml +++ b/vendor/github.com/agext/levenshtein/.travis.yml @@ -1,70 +1,25 @@ language: go sudo: false -go: - - 1.8 - - 1.7.5 - - 1.7.4 - - 1.7.3 - - 1.7.2 - - 1.7.1 - - 1.7 - - tip - - 1.6.4 - - 1.6.3 - - 1.6.2 - - 1.6.1 - - 1.6 - - 1.5.4 - - 1.5.3 - - 1.5.2 - - 1.5.1 - - 1.5 - - 1.4.3 - - 1.4.2 - - 1.4.1 - - 1.4 - - 1.3.3 - - 1.3.2 - - 1.3.1 - - 1.3 - - 1.2.2 - - 1.2.1 - - 1.2 - - 1.1.2 - - 1.1.1 - - 1.1 -before_install: - - go get github.com/mattn/goveralls -script: - - $HOME/gopath/bin/goveralls -service=travis-ci -notifications: - email: - on_success: never matrix: fast_finish: true + include: + - go: 1.11.x + env: TEST_METHOD=goveralls + - go: 1.10.x + - go: tip + - go: 1.9.x + - go: 1.8.x + - go: 1.7.x + - go: 1.6.x + - go: 1.5.x allow_failures: - go: tip - - go: 1.6.4 - - go: 1.6.3 - - go: 1.6.2 - - go: 1.6.1 - - go: 1.6 - - go: 1.5.4 - - go: 1.5.3 - - go: 1.5.2 - - go: 1.5.1 - - go: 1.5 - - go: 1.4.3 - - go: 1.4.2 - - go: 1.4.1 - - go: 1.4 - - go: 1.3.3 - - go: 1.3.2 - - go: 1.3.1 - - go: 1.3 - - go: 1.2.2 - - go: 1.2.1 - - go: 1.2 - - go: 1.1.2 - - go: 1.1.1 - - go: 1.1 + - go: 1.9.x + - go: 1.8.x + - go: 1.7.x + - go: 1.6.x + - go: 1.5.x +script: ./test.sh $TEST_METHOD +notifications: + email: + on_success: never diff --git a/vendor/github.com/agext/levenshtein/README.md b/vendor/github.com/agext/levenshtein/README.md index 90509c2a20..9e4255879f 100644 --- a/vendor/github.com/agext/levenshtein/README.md +++ b/vendor/github.com/agext/levenshtein/README.md @@ -11,7 +11,7 @@ This package implements distance and similarity metrics for strings, based on th ## Project Status -v1.2.1 Stable: Guaranteed no breaking changes to the API in future v1.x releases. Probably safe to use in production, though provided on "AS IS" basis. +v1.2.2 Stable: Guaranteed no breaking changes to the API in future v1.x releases. Probably safe to use in production, though provided on "AS IS" basis. This package is being actively maintained. If you encounter any problems or have any suggestions for improvement, please [open an issue](https://github.com/agext/levenshtein/issues). Pull requests are welcome. diff --git a/vendor/github.com/agext/levenshtein/go.mod b/vendor/github.com/agext/levenshtein/go.mod new file mode 100644 index 0000000000..545d432be3 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/go.mod @@ -0,0 +1 @@ +module github.com/agext/levenshtein diff --git a/vendor/github.com/agext/levenshtein/test.sh b/vendor/github.com/agext/levenshtein/test.sh new file mode 100644 index 0000000000..c5ed72466f --- /dev/null +++ b/vendor/github.com/agext/levenshtein/test.sh @@ -0,0 +1,10 @@ +set -ev + +if [[ "$1" == "goveralls" ]]; then + echo "Testing with goveralls..." + go get github.com/mattn/goveralls + $HOME/gopath/bin/goveralls -service=travis-ci +else + echo "Testing with go test..." + go test -v ./... +fi diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go index a31cdec773..c292db0ce0 100644 --- a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go +++ b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go @@ -71,8 +71,13 @@ func Host(base *net.IPNet, num int) (net.IP, error) { if numUint64 > maxHostNum { return nil, fmt.Errorf("prefix of %d does not accommodate a host numbered %d", parentLen, num) } - - return insertNumIntoIP(ip, num, 32), nil + var bitlength int + if ip.To4() != nil { + bitlength = 32 + } else { + bitlength = 128 + } + return insertNumIntoIP(ip, num, bitlength), nil } // AddressRange returns the first and last addresses in the given CIDR range. @@ -110,3 +115,96 @@ func AddressCount(network *net.IPNet) uint64 { prefixLen, bits := network.Mask.Size() return 1 << (uint64(bits) - uint64(prefixLen)) } + +//VerifyNoOverlap takes a list subnets and supernet (CIDRBlock) and verifies +//none of the subnets overlap and all subnets are in the supernet +//it returns an error if any of those conditions are not satisfied +func VerifyNoOverlap(subnets []*net.IPNet, CIDRBlock *net.IPNet) error { + firstLastIP := make([][]net.IP, len(subnets)) + for i, s := range subnets { + first, last := AddressRange(s) + firstLastIP[i] = []net.IP{first, last} + } + for i, s := range subnets { + if !CIDRBlock.Contains(firstLastIP[i][0]) || !CIDRBlock.Contains(firstLastIP[i][1]) { + return fmt.Errorf("%s does not fully contain %s", CIDRBlock.String(), s.String()) + } + for j := i + 1; j < len(subnets); j++ { + first := firstLastIP[j][0] + last := firstLastIP[j][1] + if s.Contains(first) || s.Contains(last) { + return fmt.Errorf("%s overlaps with %s", subnets[j].String(), s.String()) + } + } + } + return nil +} + +// PreviousSubnet returns the subnet of the desired mask in the IP space +// just lower than the start of IPNet provided. If the IP space rolls over +// then the second return value is true +func PreviousSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) { + startIP := checkIPv4(network.IP) + previousIP := make(net.IP, len(startIP)) + copy(previousIP, startIP) + cMask := net.CIDRMask(prefixLen, 8*len(previousIP)) + previousIP = Dec(previousIP) + previous := &net.IPNet{IP: previousIP.Mask(cMask), Mask: cMask} + if startIP.Equal(net.IPv4zero) || startIP.Equal(net.IPv6zero) { + return previous, true + } + return previous, false +} + +// NextSubnet returns the next available subnet of the desired mask size +// starting for the maximum IP of the offset subnet +// If the IP exceeds the maxium IP then the second return value is true +func NextSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) { + _, currentLast := AddressRange(network) + mask := net.CIDRMask(prefixLen, 8*len(currentLast)) + currentSubnet := &net.IPNet{IP: currentLast.Mask(mask), Mask: mask} + _, last := AddressRange(currentSubnet) + last = Inc(last) + next := &net.IPNet{IP: last.Mask(mask), Mask: mask} + if last.Equal(net.IPv4zero) || last.Equal(net.IPv6zero) { + return next, true + } + return next, false +} + +//Inc increases the IP by one this returns a new []byte for the IP +func Inc(IP net.IP) net.IP { + IP = checkIPv4(IP) + incIP := make([]byte, len(IP)) + copy(incIP, IP) + for j := len(incIP) - 1; j >= 0; j-- { + incIP[j]++ + if incIP[j] > 0 { + break + } + } + return incIP +} + +//Dec decreases the IP by one this returns a new []byte for the IP +func Dec(IP net.IP) net.IP { + IP = checkIPv4(IP) + decIP := make([]byte, len(IP)) + copy(decIP, IP) + decIP = checkIPv4(decIP) + for j := len(decIP) - 1; j >= 0; j-- { + decIP[j]-- + if decIP[j] < 255 { + break + } + } + return decIP +} + +func checkIPv4(ip net.IP) net.IP { + // Go for some reason allocs IPv6len for IPv4 so we have to correct it + if v4 := ip.To4(); v4 != nil { + return v4 + } + return ip +} diff --git a/vendor/github.com/armon/go-radix/go.mod b/vendor/github.com/armon/go-radix/go.mod new file mode 100644 index 0000000000..4336aa29ea --- /dev/null +++ b/vendor/github.com/armon/go-radix/go.mod @@ -0,0 +1 @@ +module github.com/armon/go-radix diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go index b6432f1a11..645df2450f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -23,28 +23,27 @@ func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { case reflect.Struct: buf.WriteString("{\n") - names := []string{} for i := 0; i < v.Type().NumField(); i++ { - name := v.Type().Field(i).Name - f := v.Field(i) - if name[0:1] == strings.ToLower(name[0:1]) { + ft := v.Type().Field(i) + fv := v.Field(i) + + if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) { continue // ignore unexported fields } - if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() { + if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() { continue // ignore unset fields } - names = append(names, name) - } - for i, n := range names { - val := v.FieldByName(n) buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(n + ": ") - stringValue(val, indent+2, buf) + buf.WriteString(ft.Name + ": ") - if i < len(names)-1 { - buf.WriteString(",\n") + if tag := ft.Tag.Get("sensitive"); tag == "true" { + buf.WriteString("") + } else { + stringValue(fv, indent+2, buf) } + + buf.WriteString(",\n") } buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go index 788fe6e279..7096053840 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -15,6 +15,12 @@ type Config struct { Endpoint string SigningRegion string SigningName string + + // States that the signing name did not come from a modeled source but + // was derived based on other data. Used by service client constructors + // to determine if the signin name can be overridden based on metadata the + // service has. + SigningNameDerived bool } // ConfigProvider provides a generic way for a service client to receive @@ -85,6 +91,6 @@ func (c *Client) AddDebugHandlers() { return } - c.Handlers.Send.PushFrontNamed(request.NamedHandler{Name: "awssdk.client.LogRequest", Fn: logRequest}) - c.Handlers.Send.PushBackNamed(request.NamedHandler{Name: "awssdk.client.LogResponse", Fn: logResponse}) + c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler) + c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go index 1313478f27..a397b0d044 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -1,11 +1,11 @@ package client import ( - "math/rand" - "sync" + "strconv" "time" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkrand" ) // DefaultRetryer implements basic retry logic using exponential backoff for @@ -15,11 +15,11 @@ import ( // the MaxRetries method: // // type retryer struct { -// service.DefaultRetryer +// client.DefaultRetryer // } // // // This implementation always has 100 max retries -// func (d retryer) MaxRetries() uint { return 100 } +// func (d retryer) MaxRetries() int { return 100 } type DefaultRetryer struct { NumMaxRetries int } @@ -30,25 +30,27 @@ func (d DefaultRetryer) MaxRetries() int { return d.NumMaxRetries } -var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) - // RetryRules returns the delay duration before retrying this request again func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { // Set the upper limit of delay in retrying at ~five minutes minTime := 30 throttle := d.shouldThrottle(r) if throttle { + if delay, ok := getRetryDelay(r); ok { + return delay + } + minTime = 500 } retryCount := r.RetryCount - if retryCount > 13 { - retryCount = 13 - } else if throttle && retryCount > 8 { + if throttle && retryCount > 8 { retryCount = 8 + } else if retryCount > 13 { + retryCount = 13 } - delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime) + delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime) return time.Duration(delay) * time.Millisecond } @@ -60,7 +62,7 @@ func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { return *r.Retryable } - if r.HTTPResponse.StatusCode >= 500 { + if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 { return true } return r.IsErrorRetryable() || d.shouldThrottle(r) @@ -68,29 +70,47 @@ func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { // ShouldThrottle returns true if the request should be throttled. func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { - if r.HTTPResponse.StatusCode == 502 || - r.HTTPResponse.StatusCode == 503 || - r.HTTPResponse.StatusCode == 504 { - return true + switch r.HTTPResponse.StatusCode { + case 429: + case 502: + case 503: + case 504: + default: + return r.IsErrorThrottle() } - return r.IsErrorThrottle() -} -// lockedSource is a thread-safe implementation of rand.Source -type lockedSource struct { - lk sync.Mutex - src rand.Source + return true } -func (r *lockedSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return +// This will look in the Retry-After header, RFC 7231, for how long +// it will wait before attempting another request +func getRetryDelay(r *request.Request) (time.Duration, bool) { + if !canUseRetryAfterHeader(r) { + return 0, false + } + + delayStr := r.HTTPResponse.Header.Get("Retry-After") + if len(delayStr) == 0 { + return 0, false + } + + delay, err := strconv.Atoi(delayStr) + if err != nil { + return 0, false + } + + return time.Duration(delay) * time.Second, true } -func (r *lockedSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() +// Will look at the status code to see if the retry header pertains to +// the status code. +func canUseRetryAfterHeader(r *request.Request) bool { + switch r.HTTPResponse.StatusCode { + case 429: + case 503: + default: + return false + } + + return true } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go index 1f39c91f2e..ce9fb896d9 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go @@ -44,22 +44,57 @@ func (reader *teeReaderCloser) Close() error { return reader.Source.Close() } +// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent +// to a service. Will include the HTTP request body if the LogLevel of the +// request matches LogDebugWithHTTPBody. +var LogHTTPRequestHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequest", + Fn: logRequest, +} + func logRequest(r *request.Request) { logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) + bodySeekable := aws.IsReaderSeekable(r.Body) + + b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err)) + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) return } if logBody { + if !bodySeekable { + r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) + } // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's // Body as a NoOpCloser and will not be reset after read by the HTTP // client reader. r.ResetBody() } - r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody))) + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent +// to a service. Will only log the HTTP request's headers. The request payload +// will not be read. +var LogHTTPRequestHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequestHeader", + Fn: logRequestHeader, +} + +func logRequestHeader(r *request.Request) { + b, err := httputil.DumpRequestOut(r.HTTPRequest, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) } const logRespMsg = `DEBUG: Response %s/%s Details: @@ -72,27 +107,44 @@ const logRespErrMsg = `DEBUG ERROR: Response %s/%s: %s -----------------------------------------------------` +// LogHTTPResponseHandler is a SDK request handler to log the HTTP response +// received from a service. Will include the HTTP response body if the LogLevel +// of the request matches LogDebugWithHTTPBody. +var LogHTTPResponseHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponse", + Fn: logResponse, +} + func logResponse(r *request.Request) { lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} - r.HTTPResponse.Body = &teeReaderCloser{ - Reader: io.TeeReader(r.HTTPResponse.Body, lw), - Source: r.HTTPResponse.Body, + + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + if logBody { + r.HTTPResponse.Body = &teeReaderCloser{ + Reader: io.TeeReader(r.HTTPResponse.Body, lw), + Source: r.HTTPResponse.Body, + } } handlerFn := func(req *request.Request) { - body, err := httputil.DumpResponse(req.HTTPResponse, false) + b, err := httputil.DumpResponse(req.HTTPResponse, false) if err != nil { - lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err)) + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) return } - b, err := ioutil.ReadAll(lw.buf) - if err != nil { - lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err)) - return - } - lw.Logger.Log(fmt.Sprintf(logRespMsg, req.ClientInfo.ServiceName, req.Operation.Name, string(body))) - if req.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) { + lw.Logger.Log(fmt.Sprintf(logRespMsg, + req.ClientInfo.ServiceName, req.Operation.Name, string(b))) + + if logBody { + b, err := ioutil.ReadAll(lw.buf) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + lw.Logger.Log(string(b)) } } @@ -106,3 +158,27 @@ func logResponse(r *request.Request) { Name: handlerName, Fn: handlerFn, }) } + +// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP +// response received from a service. Will only log the HTTP response's headers. +// The response payload will not be read. +var LogHTTPResponseHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponseHeader", + Fn: logResponseHeader, +} + +func logResponseHeader(r *request.Request) { + if r.Config.Logger == nil { + return + } + + b, err := httputil.DumpResponse(r.HTTPResponse, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logRespMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go index 4778056ddf..920e9fddf8 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -3,6 +3,7 @@ package metadata // ClientInfo wraps immutable data from the client.Client structure. type ClientInfo struct { ServiceName string + ServiceID string APIVersion string Endpoint string SigningName string diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index d1f31f1c65..10634d173d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -18,7 +18,7 @@ const UseServiceDefaultRetries = -1 type RequestRetryer interface{} // A Config provides service configuration for service clients. By default, -// all clients will use the defaults.DefaultConfig tructure. +// all clients will use the defaults.DefaultConfig structure. // // // Create Session with MaxRetry configuration to be shared by multiple // // service clients. @@ -45,8 +45,8 @@ type Config struct { // that overrides the default generated endpoint for a client. Set this // to `""` to use the default generated endpoint. // - // @note You must still provide a `Region` value when specifying an - // endpoint for a client. + // Note: You must still provide a `Region` value when specifying an + // endpoint for a client. Endpoint *string // The resolver to use for looking up endpoints for AWS service clients @@ -65,8 +65,8 @@ type Config struct { // noted. A full list of regions is found in the "Regions and Endpoints" // document. // - // @see http://docs.aws.amazon.com/general/latest/gr/rande.html - // AWS Regions and Endpoints + // See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS + // Regions and Endpoints. Region *string // Set this to `true` to disable SSL when sending requests. Defaults @@ -95,7 +95,7 @@ type Config struct { // recoverable failures. // // When nil or the value does not implement the request.Retryer interface, - // the request.DefaultRetryer will be used. + // the client.DefaultRetryer will be used. // // When both Retryer and MaxRetries are non-nil, the former is used and // the latter ignored. @@ -120,9 +120,10 @@ type Config struct { // will use virtual hosted bucket addressing when possible // (`http://BUCKET.s3.amazonaws.com/KEY`). // - // @note This configuration option is specific to the Amazon S3 service. - // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html - // Amazon S3: Virtual Hosting of Buckets + // Note: This configuration option is specific to the Amazon S3 service. + // + // See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + // for Amazon S3: Virtual Hosting of Buckets S3ForcePathStyle *bool // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` @@ -151,6 +152,15 @@ type Config struct { // with accelerate. S3UseAccelerate *bool + // S3DisableContentMD5Validation config option is temporarily disabled, + // For S3 GetObject API calls, #1837. + // + // Set this to `true` to disable the S3 service client from automatically + // adding the ContentMD5 to S3 Object Put and Upload API calls. This option + // will also disable the SDK from performing object ContentMD5 validation + // on GetObject API calls. + S3DisableContentMD5Validation *bool + // Set this to `true` to disable the EC2Metadata client from overriding the // default http.Client's Timeout. This is helpful if you do not want the // EC2Metadata client to create a new http.Client. This options is only @@ -168,7 +178,7 @@ type Config struct { // EC2MetadataDisableTimeoutOverride *bool - // Instructs the endpiont to be generated for a service client to + // Instructs the endpoint to be generated for a service client to // be the dual stack endpoint. The dual stack endpoint will support // both IPv4 and IPv6 addressing. // @@ -214,6 +224,28 @@ type Config struct { // Key: aws.String("//foo//bar//moo"), // }) DisableRestProtocolURICleaning *bool + + // EnableEndpointDiscovery will allow for endpoint discovery on operations that + // have the definition in its model. By default, endpoint discovery is off. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // EnableEndpointDiscovery: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("/foo/bar/moo"), + // }) + EnableEndpointDiscovery *bool + + // DisableEndpointHostPrefix will disable the SDK's behavior of prefixing + // request endpoint hosts with modeled information. + // + // Disabling this feature is useful when you want to use local endpoints + // for testing that do not support the modeled host prefix pattern. + DisableEndpointHostPrefix *bool } // NewConfig returns a new Config pointer that can be chained with builder @@ -336,6 +368,15 @@ func (c *Config) WithS3Disable100Continue(disable bool) *Config { func (c *Config) WithS3UseAccelerate(enable bool) *Config { c.S3UseAccelerate = &enable return c + +} + +// WithS3DisableContentMD5Validation sets a config +// S3DisableContentMD5Validation value returning a Config pointer for chaining. +func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config { + c.S3DisableContentMD5Validation = &enable + return c + } // WithUseDualStack sets a config UseDualStack value returning a Config @@ -359,6 +400,19 @@ func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { return c } +// WithEndpointDiscovery will set whether or not to use endpoint discovery. +func (c *Config) WithEndpointDiscovery(t bool) *Config { + c.EnableEndpointDiscovery = &t + return c +} + +// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix +// when making requests. +func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config { + c.DisableEndpointHostPrefix = &t + return c +} + // MergeIn merges the passed in configs into the existing config object. func (c *Config) MergeIn(cfgs ...*Config) { for _, other := range cfgs { @@ -435,6 +489,10 @@ func mergeInConfig(dst *Config, other *Config) { dst.S3UseAccelerate = other.S3UseAccelerate } + if other.S3DisableContentMD5Validation != nil { + dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation + } + if other.UseDualStack != nil { dst.UseDualStack = other.UseDualStack } @@ -454,6 +512,14 @@ func mergeInConfig(dst *Config, other *Config) { if other.EnforceShouldRetryCheck != nil { dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck } + + if other.EnableEndpointDiscovery != nil { + dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery + } + + if other.DisableEndpointHostPrefix != nil { + dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix + } } // Copy will return a shallow copy of the Config object. If any additional diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go similarity index 58% rename from vendor/github.com/aws/aws-sdk-go/aws/context.go rename to vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go index 79f426853b..2866f9a7fb 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/context.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go @@ -1,8 +1,8 @@ +// +build !go1.9 + package aws -import ( - "time" -) +import "time" // Context is an copy of the Go v1.7 stdlib's context.Context interface. // It is represented as a SDK interface to enable you to use the "WithContext" @@ -35,37 +35,3 @@ type Context interface { // functions. Value(key interface{}) interface{} } - -// BackgroundContext returns a context that will never be canceled, has no -// values, and no deadline. This context is used by the SDK to provide -// backwards compatibility with non-context API operations and functionality. -// -// Go 1.6 and before: -// This context function is equivalent to context.Background in the Go stdlib. -// -// Go 1.7 and later: -// The context returned will be the value returned by context.Background() -// -// See https://golang.org/pkg/context for more information on Contexts. -func BackgroundContext() Context { - return backgroundCtx -} - -// SleepWithContext will wait for the timer duration to expire, or the context -// is canceled. Which ever happens first. If the context is canceled the Context's -// error will be returned. -// -// Expects Context to always return a non-nil error if the Done channel is closed. -func SleepWithContext(ctx Context, dur time.Duration) error { - t := time.NewTimer(dur) - defer t.Stop() - - select { - case <-t.C: - break - case <-ctx.Done(): - return ctx.Err() - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go deleted file mode 100644 index e8cf93d269..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build !go1.7 - -package aws - -import "time" - -// An emptyCtx is a copy of the the Go 1.7 context.emptyCtx type. This -// is copied to provide a 1.6 and 1.5 safe version of context that is compatible -// with Go 1.7's Context. -// -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case backgroundCtx: - return "aws.BackgroundContext" - } - return "unknown empty Context" -} - -var ( - backgroundCtx = new(emptyCtx) -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go deleted file mode 100644 index 064f75c925..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build go1.7 - -package aws - -import "context" - -var ( - backgroundCtx = context.Background() -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go new file mode 100644 index 0000000000..3718b26e10 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go @@ -0,0 +1,11 @@ +// +build go1.9 + +package aws + +import "context" + +// Context is an alias of the Go stdlib's context.Context interface. +// It can be used within the SDK's API operation "WithContext" methods. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context = context.Context diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go new file mode 100644 index 0000000000..66c5945db1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go @@ -0,0 +1,56 @@ +// +build !go1.7 + +package aws + +import "time" + +// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to +// provide a 1.6 and 1.5 safe version of context that is compatible with Go +// 1.7's Context. +// +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case backgroundCtx: + return "aws.BackgroundContext" + } + return "unknown empty Context" +} + +var ( + backgroundCtx = new(emptyCtx) +) + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return backgroundCtx +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go new file mode 100644 index 0000000000..9c29f29af1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go @@ -0,0 +1,20 @@ +// +build go1.7 + +package aws + +import "context" + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return context.Background() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go new file mode 100644 index 0000000000..304fd15612 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go @@ -0,0 +1,24 @@ +package aws + +import ( + "time" +) + +// SleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the Context's +// error will be returned. +// +// Expects Context to always return a non-nil error if the Done channel is closed. +func SleepWithContext(ctx Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go index 3b73a7da7f..ff5d58e068 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -311,6 +311,24 @@ func TimeValue(v *time.Time) time.Time { return time.Time{} } +// SecondsTimeValue converts an int64 pointer to a time.Time value +// representing seconds since Epoch or time.Time{} if the pointer is nil. +func SecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix((*v / 1000), 0) + } + return time.Time{} +} + +// MillisecondsTimeValue converts an int64 pointer to a time.Time value +// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil. +func MillisecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix(0, (*v * 1000000)) + } + return time.Time{} +} + // TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". // The result is undefined if the Unix time cannot be represented by an int64. // Which includes calling TimeUnixMilli on a zero Time is undefined. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go index 495e3ef62c..f8853d78af 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -3,12 +3,10 @@ package corehandlers import ( "bytes" "fmt" - "io" "io/ioutil" "net/http" "net/url" "regexp" - "runtime" "strconv" "time" @@ -36,18 +34,13 @@ var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLen if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { length, _ = strconv.ParseInt(slength, 10, 64) } else { - switch body := r.Body.(type) { - case nil: - length = 0 - case lener: - length = int64(body.Len()) - case io.Seeker: - r.BodyStart, _ = body.Seek(0, 1) - end, _ := body.Seek(0, 2) - body.Seek(r.BodyStart, 0) // make sure to seek back to original location - length = end - r.BodyStart - default: - panic("Cannot get length of body, must provide `ContentLength`") + if r.Body != nil { + var err error + length, err = aws.SeekerLen(r.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err) + return + } } } @@ -60,13 +53,6 @@ var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLen } }} -// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent. -var SDKVersionUserAgentHandler = request.NamedHandler{ - Name: "core.SDKVersionUserAgentHandler", - Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, - runtime.Version(), runtime.GOOS, runtime.GOARCH), -} - var reStatusCode = regexp.MustCompile(`^(\d{3})`) // ValidateReqSigHandler is a request handler to ensure that the request's @@ -86,9 +72,9 @@ var ValidateReqSigHandler = request.NamedHandler{ signedTime = r.LastSignedAt } - // 10 minutes to allow for some clock skew/delays in transmission. + // 5 minutes to allow for some clock skew/delays in transmission. // Would be improved with aws/aws-sdk-go#423 - if signedTime.Add(10 * time.Minute).After(time.Now()) { + if signedTime.Add(5 * time.Minute).After(time.Now()) { return } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go new file mode 100644 index 0000000000..ab69c7a6f3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go @@ -0,0 +1,37 @@ +package corehandlers + +import ( + "os" + "runtime" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version +// to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +const execEnvVar = `AWS_EXECUTION_ENV` +const execEnvUAKey = `exec-env` + +// AddHostExecEnvUserAgentHander is a request handler appending the SDK's +// execution environment to the user agent. +// +// If the environment variable AWS_EXECUTION_ENV is set, its value will be +// appended to the user agent string. +var AddHostExecEnvUserAgentHander = request.NamedHandler{ + Name: "core.AddHostExecEnvUserAgentHander", + Fn: func(r *request.Request) { + v := os.Getenv(execEnvVar) + if len(v) == 0 { + return + } + + request.AddToUserAgent(r, execEnvUAKey+"/"+v) + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go index f298d65962..3ad1e798df 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -9,9 +9,7 @@ var ( // providers in the ChainProvider. // // This has been deprecated. For verbose error messaging set - // aws.Config.CredentialsChainVerboseErrors to true - // - // @readonly + // aws.Config.CredentialsChainVerboseErrors to true. ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", `no valid providers in chain. Deprecated. For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go index 42416fc2f0..894bbc7f82 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -49,6 +49,8 @@ package credentials import ( + "fmt" + "github.com/aws/aws-sdk-go/aws/awserr" "sync" "time" ) @@ -64,8 +66,6 @@ import ( // Credentials: credentials.AnonymousCredentials, // }))) // // Access public S3 buckets. -// -// @readonly var AnonymousCredentials = NewStaticCredentials("", "", "") // A Value is the AWS credentials value for individual credential fields. @@ -99,6 +99,14 @@ type Provider interface { IsExpired() bool } +// An Expirer is an interface that Providers can implement to expose the expiration +// time, if known. If the Provider cannot accurately provide this info, +// it should not implement this interface. +type Expirer interface { + // The time at which the credentials are no longer valid + ExpiresAt() time.Time +} + // An ErrorProvider is a stub credentials provider that always returns an error // this is used by the SDK when construction a known provider is not possible // due to an error. @@ -158,13 +166,19 @@ func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { // IsExpired returns if the credentials are expired. func (e *Expiry) IsExpired() bool { - if e.CurrentTime == nil { - e.CurrentTime = time.Now + curTime := e.CurrentTime + if curTime == nil { + curTime = time.Now } - return e.expiration.Before(e.CurrentTime()) + return e.expiration.Before(curTime()) +} + +// ExpiresAt returns the expiration time of the credential +func (e *Expiry) ExpiresAt() time.Time { + return e.expiration } -// A Credentials provides synchronous safe retrieval of AWS credentials Value. +// A Credentials provides concurrency safe retrieval of AWS credentials Value. // Credentials will cache the credentials value until they expire. Once the value // expires the next Get will attempt to retrieve valid credentials. // @@ -178,7 +192,8 @@ func (e *Expiry) IsExpired() bool { type Credentials struct { creds Value forceRefresh bool - m sync.Mutex + + m sync.RWMutex provider Provider } @@ -201,6 +216,17 @@ func NewCredentials(provider Provider) *Credentials { // If Credentials.Expire() was called the credentials Value will be force // expired, and the next call to Get() will cause them to be refreshed. func (c *Credentials) Get() (Value, error) { + // Check the cached credentials first with just the read lock. + c.m.RLock() + if !c.isExpired() { + creds := c.creds + c.m.RUnlock() + return creds, nil + } + c.m.RUnlock() + + // Credentials are expired need to retrieve the credentials taking the full + // lock. c.m.Lock() defer c.m.Unlock() @@ -234,8 +260,8 @@ func (c *Credentials) Expire() { // If the Credentials were forced to be expired with Expire() this will // reflect that override. func (c *Credentials) IsExpired() bool { - c.m.Lock() - defer c.m.Unlock() + c.m.RLock() + defer c.m.RUnlock() return c.isExpired() } @@ -244,3 +270,23 @@ func (c *Credentials) IsExpired() bool { func (c *Credentials) isExpired() bool { return c.forceRefresh || c.provider.IsExpired() } + +// ExpiresAt provides access to the functionality of the Expirer interface of +// the underlying Provider, if it supports that interface. Otherwise, it returns +// an error. +func (c *Credentials) ExpiresAt() (time.Time, error) { + c.m.RLock() + defer c.m.RUnlock() + + expirer, ok := c.provider.(Expirer) + if !ok { + return time.Time{}, awserr.New("ProviderNotExpirer", + fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.ProviderName), + nil) + } + if c.forceRefresh { + // set expiration time to the distant past + return time.Time{}, nil + } + return expirer.ExpiresAt(), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go index c39749524e..0ed791be64 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -4,7 +4,6 @@ import ( "bufio" "encoding/json" "fmt" - "path" "strings" "time" @@ -12,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/internal/sdkuri" ) // ProviderName provides a name of EC2Role provider @@ -125,7 +125,7 @@ type ec2RoleCredRespBody struct { Message string } -const iamSecurityCredsPath = "/iam/security-credentials" +const iamSecurityCredsPath = "iam/security-credentials/" // requestCredList requests a list of credentials from the EC2 service. // If there are no credentials, or there is an error making or receiving the request @@ -153,7 +153,7 @@ func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { // If the credentials cannot be found, or there is an error reading the response // and error will be returned. func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { - resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName)) + resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName)) if err != nil { return ec2RoleCredRespBody{}, awserr.New("EC2RoleRequestError", diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go index a4cec5c553..ace5131382 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -65,6 +65,10 @@ type Provider struct { // // If ExpiryWindow is 0 or less it will be ignored. ExpiryWindow time.Duration + + // Optional authorization token value if set will be used as the value of + // the Authorization header of the endpoint credential request. + AuthorizationToken string } // NewProviderClient returns a credentials Provider for retrieving AWS credentials @@ -152,6 +156,9 @@ func (p *Provider) getCredentials() (*getCredentialsOutput, error) { out := &getCredentialsOutput{} req := p.Client.NewRequest(op, nil, out) req.HTTPRequest.Header.Set("Accept", "application/json") + if authToken := p.AuthorizationToken; len(authToken) != 0 { + req.HTTPRequest.Header.Set("Authorization", authToken) + } return out, req.Send() } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go index c14231a16f..54c5cf7333 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -12,14 +12,10 @@ const EnvProviderName = "EnvProvider" var ( // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be // found in the process's environment. - // - // @readonly ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key // can't be found in the process's environment. - // - // @readonly ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) ) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go new file mode 100644 index 0000000000..1980c8c140 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go @@ -0,0 +1,425 @@ +/* +Package processcreds is a credential Provider to retrieve `credential_process` +credentials. + +WARNING: The following describes a method of sourcing credentials from an external +process. This can potentially be dangerous, so proceed with caution. Other +credential providers should be preferred if at all possible. If using this +option, you should make sure that the config file is as locked down as possible +using security best practices for your operating system. + +You can use credentials from a `credential_process` in a variety of ways. + +One way is to setup your shared config file, located in the default +location, with the `credential_process` key and the command you want to be +called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable +(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file. + + [default] + credential_process = /command/to/call + +Creating a new session will use the credential process to retrieve credentials. +NOTE: If there are credentials in the profile you are using, the credential +process will not be used. + + // Initialize a session to load credentials. + sess, _ := session.NewSession(&aws.Config{ + Region: aws.String("us-east-1")}, + ) + + // Create S3 service client to use the credentials. + svc := s3.New(sess) + +Another way to use the `credential_process` method is by using +`credentials.NewCredentials()` and providing a command to be executed to +retrieve credentials: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentials("/path/to/command") + + // Create service client value configured for credentials. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +You can set a non-default timeout for the `credential_process` with another +constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To +set a one minute timeout: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentialsTimeout( + "/path/to/command", + time.Duration(500) * time.Millisecond) + +If you need more control, you can set any configurable options in the +credentials using one or more option functions. For example, you can set a two +minute timeout, a credential duration of 60 minutes, and a maximum stdout +buffer size of 2k. + + creds := processcreds.NewCredentials( + "/path/to/command", + func(opt *ProcessProvider) { + opt.Timeout = time.Duration(2) * time.Minute + opt.Duration = time.Duration(60) * time.Minute + opt.MaxBufSize = 2048 + }) + +You can also use your own `exec.Cmd`: + + // Create an exec.Cmd + myCommand := exec.Command("/path/to/command") + + // Create credentials using your exec.Cmd and custom timeout + creds := processcreds.NewCredentialsCommand( + myCommand, + func(opt *processcreds.ProcessProvider) { + opt.Timeout = time.Duration(1) * time.Second + }) +*/ +package processcreds + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "runtime" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" +) + +const ( + // ProviderName is the name this credentials provider will label any + // returned credentials Value with. + ProviderName = `ProcessProvider` + + // ErrCodeProcessProviderParse error parsing process output + ErrCodeProcessProviderParse = "ProcessProviderParseError" + + // ErrCodeProcessProviderVersion version error in output + ErrCodeProcessProviderVersion = "ProcessProviderVersionError" + + // ErrCodeProcessProviderRequired required attribute missing in output + ErrCodeProcessProviderRequired = "ProcessProviderRequiredError" + + // ErrCodeProcessProviderExecution execution of command failed + ErrCodeProcessProviderExecution = "ProcessProviderExecutionError" + + // errMsgProcessProviderTimeout process took longer than allowed + errMsgProcessProviderTimeout = "credential process timed out" + + // errMsgProcessProviderProcess process error + errMsgProcessProviderProcess = "error in credential_process" + + // errMsgProcessProviderParse problem parsing output + errMsgProcessProviderParse = "parse failed of credential_process output" + + // errMsgProcessProviderVersion version error in output + errMsgProcessProviderVersion = "wrong version in process output (not 1)" + + // errMsgProcessProviderMissKey missing access key id in output + errMsgProcessProviderMissKey = "missing AccessKeyId in process output" + + // errMsgProcessProviderMissSecret missing secret acess key in output + errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output" + + // errMsgProcessProviderPrepareCmd prepare of command failed + errMsgProcessProviderPrepareCmd = "failed to prepare command" + + // errMsgProcessProviderEmptyCmd command must not be empty + errMsgProcessProviderEmptyCmd = "command must not be empty" + + // errMsgProcessProviderPipe failed to initialize pipe + errMsgProcessProviderPipe = "failed to initialize pipe" + + // DefaultDuration is the default amount of time in minutes that the + // credentials will be valid for. + DefaultDuration = time.Duration(15) * time.Minute + + // DefaultBufSize limits buffer size from growing to an enormous + // amount due to a faulty process. + DefaultBufSize = 1024 + + // DefaultTimeout default limit on time a process can run. + DefaultTimeout = time.Duration(1) * time.Minute +) + +// ProcessProvider satisfies the credentials.Provider interface, and is a +// client to retrieve credentials from a process. +type ProcessProvider struct { + staticCreds bool + credentials.Expiry + originalCommand []string + + // Expiry duration of the credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // A string representing an os command that should return a JSON with + // credential information. + command *exec.Cmd + + // MaxBufSize limits memory usage from growing to an enormous + // amount due to a faulty process. + MaxBufSize int + + // Timeout limits the time a process can run. + Timeout time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// ProcessProvider. The credentials will expire every 15 minutes by default. +func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: exec.Command(command), + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsTimeout returns a pointer to a new Credentials object with +// the specified command and timeout, and default duration and max buffer size. +func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials { + p := NewCredentials(command, func(opt *ProcessProvider) { + opt.Timeout = timeout + }) + + return p +} + +// NewCredentialsCommand returns a pointer to a new Credentials object with +// the specified command, and default timeout, duration and max buffer size. +func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: command, + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +type credentialProcessResponse struct { + Version int + AccessKeyID string `json:"AccessKeyId"` + SecretAccessKey string + SessionToken string + Expiration *time.Time +} + +// Retrieve executes the 'credential_process' and returns the credentials. +func (p *ProcessProvider) Retrieve() (credentials.Value, error) { + out, err := p.executeCredentialProcess() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // Serialize and validate response + resp := &credentialProcessResponse{} + if err = json.Unmarshal(out, resp); err != nil { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderParse, + fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)), + err) + } + + if resp.Version != 1 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderVersion, + errMsgProcessProviderVersion, + nil) + } + + if len(resp.AccessKeyID) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissKey, + nil) + } + + if len(resp.SecretAccessKey) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissSecret, + nil) + } + + // Handle expiration + p.staticCreds = resp.Expiration == nil + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } + + return credentials.Value{ + ProviderName: ProviderName, + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.SessionToken, + }, nil +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *ProcessProvider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// prepareCommand prepares the command to be executed. +func (p *ProcessProvider) prepareCommand() error { + + var cmdArgs []string + if runtime.GOOS == "windows" { + cmdArgs = []string{"cmd.exe", "/C"} + } else { + cmdArgs = []string{"sh", "-c"} + } + + if len(p.originalCommand) == 0 { + p.originalCommand = make([]string, len(p.command.Args)) + copy(p.originalCommand, p.command.Args) + + // check for empty command because it succeeds + if len(strings.TrimSpace(p.originalCommand[0])) < 1 { + return awserr.New( + ErrCodeProcessProviderExecution, + fmt.Sprintf( + "%s: %s", + errMsgProcessProviderPrepareCmd, + errMsgProcessProviderEmptyCmd), + nil) + } + } + + cmdArgs = append(cmdArgs, p.originalCommand...) + p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...) + p.command.Env = os.Environ() + + return nil +} + +// executeCredentialProcess starts the credential process on the OS and +// returns the results or an error. +func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) { + + if err := p.prepareCommand(); err != nil { + return nil, err + } + + // Setup the pipes + outReadPipe, outWritePipe, err := os.Pipe() + if err != nil { + return nil, awserr.New( + ErrCodeProcessProviderExecution, + errMsgProcessProviderPipe, + err) + } + + p.command.Stderr = os.Stderr // display stderr on console for MFA + p.command.Stdout = outWritePipe // get creds json on process's stdout + p.command.Stdin = os.Stdin // enable stdin for MFA + + output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize)) + + stdoutCh := make(chan error, 1) + go readInput( + io.LimitReader(outReadPipe, int64(p.MaxBufSize)), + output, + stdoutCh) + + execCh := make(chan error, 1) + go executeCommand(*p.command, execCh) + + finished := false + var errors []error + for !finished { + select { + case readError := <-stdoutCh: + errors = appendError(errors, readError) + finished = true + case execError := <-execCh: + err := outWritePipe.Close() + errors = appendError(errors, err) + errors = appendError(errors, execError) + if errors != nil { + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderProcess, + errors) + } + case <-time.After(p.Timeout): + finished = true + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderTimeout, + errors) // errors can be nil + } + } + + out := output.Bytes() + + if runtime.GOOS == "windows" { + // windows adds slashes to quotes + out = []byte(strings.Replace(string(out), `\"`, `"`, -1)) + } + + return out, nil +} + +// appendError conveniently checks for nil before appending slice +func appendError(errors []error, err error) []error { + if err != nil { + return append(errors, err) + } + return errors +} + +func executeCommand(cmd exec.Cmd, exec chan error) { + // Start the command + err := cmd.Start() + if err == nil { + err = cmd.Wait() + } + + exec <- err +} + +func readInput(r io.Reader, w io.Writer, read chan error) { + tee := io.TeeReader(r, w) + + _, err := ioutil.ReadAll(tee) + + if err == io.EOF { + err = nil + } + + read <- err // will only arrive here when write end of pipe is closed +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go index 51e21e0f38..e155149581 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -4,9 +4,8 @@ import ( "fmt" "os" - "github.com/go-ini/ini" - "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/ini" "github.com/aws/aws-sdk-go/internal/shareddefaults" ) @@ -77,36 +76,37 @@ func (p *SharedCredentialsProvider) IsExpired() bool { // The credentials retrieved from the profile will be returned or error. Error will be // returned if it fails to read from the file, or the data is invalid. func loadProfile(filename, profile string) (Value, error) { - config, err := ini.Load(filename) + config, err := ini.OpenFile(filename) if err != nil { return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) } - iniProfile, err := config.GetSection(profile) - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err) + + iniProfile, ok := config.GetSection(profile) + if !ok { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil) } - id, err := iniProfile.GetKey("aws_access_key_id") - if err != nil { + id := iniProfile.String("aws_access_key_id") + if len(id) == 0 { return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), - err) + nil) } - secret, err := iniProfile.GetKey("aws_secret_access_key") - if err != nil { + secret := iniProfile.String("aws_secret_access_key") + if len(secret) == 0 { return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), nil) } // Default to empty string if not found - token := iniProfile.Key("aws_session_token") + token := iniProfile.String("aws_session_token") return Value{ - AccessKeyID: id.String(), - SecretAccessKey: secret.String(), - SessionToken: token.String(), + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, ProviderName: SharedCredsProviderName, }, nil } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go index 4f5dab3fcc..531139e397 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -9,8 +9,6 @@ const StaticProviderName = "StaticProvider" var ( // ErrStaticCredentialsEmpty is emitted when static credentials are empty. - // - // @readonly ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) ) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go new file mode 100644 index 0000000000..152d785b36 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go @@ -0,0 +1,46 @@ +// Package csm provides Client Side Monitoring (CSM) which enables sending metrics +// via UDP connection. Using the Start function will enable the reporting of +// metrics on a given port. If Start is called, with different parameters, again, +// a panic will occur. +// +// Pause can be called to pause any metrics publishing on a given port. Sessions +// that have had their handlers modified via InjectHandlers may still be used. +// However, the handlers will act as a no-op meaning no metrics will be published. +// +// Example: +// r, err := csm.Start("clientID", ":31000") +// if err != nil { +// panic(fmt.Errorf("failed starting CSM: %v", err)) +// } +// +// sess, err := session.NewSession(&aws.Config{}) +// if err != nil { +// panic(fmt.Errorf("failed loading session: %v", err)) +// } +// +// r.InjectHandlers(&sess.Handlers) +// +// client := s3.New(sess) +// resp, err := client.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +// +// // Will pause monitoring +// r.Pause() +// resp, err = client.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +// +// // Resume monitoring +// r.Continue() +// +// Start returns a Reporter that is used to enable or disable monitoring. If +// access to the Reporter is required later, calling Get will return the Reporter +// singleton. +// +// Example: +// r := csm.Get() +// r.Continue() +package csm diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go new file mode 100644 index 0000000000..2f0c6eac9a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go @@ -0,0 +1,67 @@ +package csm + +import ( + "fmt" + "sync" +) + +var ( + lock sync.Mutex +) + +// Client side metric handler names +const ( + APICallMetricHandlerName = "awscsm.SendAPICallMetric" + APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" +) + +// Start will start the a long running go routine to capture +// client side metrics. Calling start multiple time will only +// start the metric listener once and will panic if a different +// client ID or port is passed in. +// +// Example: +// r, err := csm.Start("clientID", "127.0.0.1:8094") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// sess := session.NewSession() +// r.InjectHandlers(sess.Handlers) +// +// svc := s3.New(sess) +// out, err := svc.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +func Start(clientID string, url string) (*Reporter, error) { + lock.Lock() + defer lock.Unlock() + + if sender == nil { + sender = newReporter(clientID, url) + } else { + if sender.clientID != clientID { + panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID)) + } + + if sender.url != url { + panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url)) + } + } + + if err := connect(url); err != nil { + sender = nil + return nil, err + } + + return sender, nil +} + +// Get will return a reporter if one exists, if one does not exist, nil will +// be returned. +func Get() *Reporter { + lock.Lock() + defer lock.Unlock() + + return sender +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go new file mode 100644 index 0000000000..5bacc791a1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go @@ -0,0 +1,109 @@ +package csm + +import ( + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" +) + +type metricTime time.Time + +func (t metricTime) MarshalJSON() ([]byte, error) { + ns := time.Duration(time.Time(t).UnixNano()) + return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil +} + +type metric struct { + ClientID *string `json:"ClientId,omitempty"` + API *string `json:"Api,omitempty"` + Service *string `json:"Service,omitempty"` + Timestamp *metricTime `json:"Timestamp,omitempty"` + Type *string `json:"Type,omitempty"` + Version *int `json:"Version,omitempty"` + + AttemptCount *int `json:"AttemptCount,omitempty"` + Latency *int `json:"Latency,omitempty"` + + Fqdn *string `json:"Fqdn,omitempty"` + UserAgent *string `json:"UserAgent,omitempty"` + AttemptLatency *int `json:"AttemptLatency,omitempty"` + + SessionToken *string `json:"SessionToken,omitempty"` + Region *string `json:"Region,omitempty"` + AccessKey *string `json:"AccessKey,omitempty"` + HTTPStatusCode *int `json:"HttpStatusCode,omitempty"` + XAmzID2 *string `json:"XAmzId2,omitempty"` + XAmzRequestID *string `json:"XAmznRequestId,omitempty"` + + AWSException *string `json:"AwsException,omitempty"` + AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"` + SDKException *string `json:"SdkException,omitempty"` + SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"` + + FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"` + FinalAWSException *string `json:"FinalAwsException,omitempty"` + FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"` + FinalSDKException *string `json:"FinalSdkException,omitempty"` + FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"` + + DestinationIP *string `json:"DestinationIp,omitempty"` + ConnectionReused *int `json:"ConnectionReused,omitempty"` + + AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"` + ConnectLatency *int `json:"ConnectLatency,omitempty"` + RequestLatency *int `json:"RequestLatency,omitempty"` + DNSLatency *int `json:"DnsLatency,omitempty"` + TCPLatency *int `json:"TcpLatency,omitempty"` + SSLLatency *int `json:"SslLatency,omitempty"` + + MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"` +} + +func (m *metric) TruncateFields() { + m.ClientID = truncateString(m.ClientID, 255) + m.UserAgent = truncateString(m.UserAgent, 256) + + m.AWSException = truncateString(m.AWSException, 128) + m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512) + + m.SDKException = truncateString(m.SDKException, 128) + m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512) + + m.FinalAWSException = truncateString(m.FinalAWSException, 128) + m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512) + + m.FinalSDKException = truncateString(m.FinalSDKException, 128) + m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512) +} + +func truncateString(v *string, l int) *string { + if v != nil && len(*v) > l { + nv := (*v)[:l] + return &nv + } + + return v +} + +func (m *metric) SetException(e metricException) { + switch te := e.(type) { + case awsException: + m.AWSException = aws.String(te.exception) + m.AWSExceptionMessage = aws.String(te.message) + case sdkException: + m.SDKException = aws.String(te.exception) + m.SDKExceptionMessage = aws.String(te.message) + } +} + +func (m *metric) SetFinalException(e metricException) { + switch te := e.(type) { + case awsException: + m.FinalAWSException = aws.String(te.exception) + m.FinalAWSExceptionMessage = aws.String(te.message) + case sdkException: + m.FinalSDKException = aws.String(te.exception) + m.FinalSDKExceptionMessage = aws.String(te.message) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go new file mode 100644 index 0000000000..514fc3739a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go @@ -0,0 +1,54 @@ +package csm + +import ( + "sync/atomic" +) + +const ( + runningEnum = iota + pausedEnum +) + +var ( + // MetricsChannelSize of metrics to hold in the channel + MetricsChannelSize = 100 +) + +type metricChan struct { + ch chan metric + paused int64 +} + +func newMetricChan(size int) metricChan { + return metricChan{ + ch: make(chan metric, size), + } +} + +func (ch *metricChan) Pause() { + atomic.StoreInt64(&ch.paused, pausedEnum) +} + +func (ch *metricChan) Continue() { + atomic.StoreInt64(&ch.paused, runningEnum) +} + +func (ch *metricChan) IsPaused() bool { + v := atomic.LoadInt64(&ch.paused) + return v == pausedEnum +} + +// Push will push metrics to the metric channel if the channel +// is not paused +func (ch *metricChan) Push(m metric) bool { + if ch.IsPaused() { + return false + } + + select { + case ch.ch <- m: + return true + default: + return false + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go new file mode 100644 index 0000000000..54a99280ce --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go @@ -0,0 +1,26 @@ +package csm + +type metricException interface { + Exception() string + Message() string +} + +type requestException struct { + exception string + message string +} + +func (e requestException) Exception() string { + return e.exception +} +func (e requestException) Message() string { + return e.message +} + +type awsException struct { + requestException +} + +type sdkException struct { + requestException +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go new file mode 100644 index 0000000000..0b5571acfb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go @@ -0,0 +1,260 @@ +package csm + +import ( + "encoding/json" + "net" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +const ( + // DefaultPort is used when no port is specified + DefaultPort = "31000" +) + +// Reporter will gather metrics of API requests made and +// send those metrics to the CSM endpoint. +type Reporter struct { + clientID string + url string + conn net.Conn + metricsCh metricChan + done chan struct{} +} + +var ( + sender *Reporter +) + +func connect(url string) error { + const network = "udp" + if err := sender.connect(network, url); err != nil { + return err + } + + if sender.done == nil { + sender.done = make(chan struct{}) + go sender.start() + } + + return nil +} + +func newReporter(clientID, url string) *Reporter { + return &Reporter{ + clientID: clientID, + url: url, + metricsCh: newMetricChan(MetricsChannelSize), + } +} + +func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + creds, _ := r.Config.Credentials.Get() + + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Region: r.Config.Region, + Type: aws.String("ApiCallAttempt"), + Version: aws.Int(1), + + XAmzRequestID: aws.String(r.RequestID), + + AttemptCount: aws.Int(r.RetryCount + 1), + AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))), + AccessKey: aws.String(creds.AccessKeyID), + } + + if r.HTTPResponse != nil { + m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + m.SetException(getMetricException(awserr)) + } + } + + m.TruncateFields() + rep.metricsCh.Push(m) +} + +func getMetricException(err awserr.Error) metricException { + msg := err.Error() + code := err.Code() + + switch code { + case "RequestError", + "SerializationError", + request.CanceledErrorCode: + return sdkException{ + requestException{exception: code, message: msg}, + } + default: + return awsException{ + requestException{exception: code, message: msg}, + } + } +} + +func (rep *Reporter) sendAPICallMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Type: aws.String("ApiCall"), + AttemptCount: aws.Int(r.RetryCount + 1), + Region: r.Config.Region, + Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)), + XAmzRequestID: aws.String(r.RequestID), + MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())), + } + + if r.HTTPResponse != nil { + m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + m.SetFinalException(getMetricException(awserr)) + } + } + + m.TruncateFields() + + // TODO: Probably want to figure something out for logging dropped + // metrics + rep.metricsCh.Push(m) +} + +func (rep *Reporter) connect(network, url string) error { + if rep.conn != nil { + rep.conn.Close() + } + + conn, err := net.Dial(network, url) + if err != nil { + return awserr.New("UDPError", "Could not connect", err) + } + + rep.conn = conn + + return nil +} + +func (rep *Reporter) close() { + if rep.done != nil { + close(rep.done) + } + + rep.metricsCh.Pause() +} + +func (rep *Reporter) start() { + defer func() { + rep.metricsCh.Pause() + }() + + for { + select { + case <-rep.done: + rep.done = nil + return + case m := <-rep.metricsCh.ch: + // TODO: What to do with this error? Probably should just log + b, err := json.Marshal(m) + if err != nil { + continue + } + + rep.conn.Write(b) + } + } +} + +// Pause will pause the metric channel preventing any new metrics from +// being added. +func (rep *Reporter) Pause() { + lock.Lock() + defer lock.Unlock() + + if rep == nil { + return + } + + rep.close() +} + +// Continue will reopen the metric channel and allow for monitoring +// to be resumed. +func (rep *Reporter) Continue() { + lock.Lock() + defer lock.Unlock() + if rep == nil { + return + } + + if !rep.metricsCh.IsPaused() { + return + } + + rep.metricsCh.Continue() +} + +// InjectHandlers will will enable client side metrics and inject the proper +// handlers to handle how metrics are sent. +// +// Example: +// // Start must be called in order to inject the correct handlers +// r, err := csm.Start("clientID", "127.0.0.1:8094") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// +// sess := session.NewSession() +// r.InjectHandlers(&sess.Handlers) +// +// // create a new service client with our client side metric session +// svc := s3.New(sess) +func (rep *Reporter) InjectHandlers(handlers *request.Handlers) { + if rep == nil { + return + } + + handlers.Complete.PushFrontNamed(request.NamedHandler{ + Name: APICallMetricHandlerName, + Fn: rep.sendAPICallMetric, + }) + + handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{ + Name: APICallAttemptMetricHandlerName, + Fn: rep.sendAPICallAttemptMetric, + }) +} + +// boolIntValue return 1 for true and 0 for false. +func boolIntValue(b bool) int { + if b { + return 1 + } + + return 0 +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go index 07afe3b8e6..23bb639e01 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -9,6 +9,7 @@ package defaults import ( "fmt" + "net" "net/http" "net/url" "os" @@ -23,6 +24,7 @@ import ( "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" ) // A Defaults provides a collection of default values for SDK clients. @@ -72,6 +74,7 @@ func Handlers() request.Handlers { handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) handlers.Validate.AfterEachFn = request.HandlerListStopOnError handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander) handlers.Build.AfterEachFn = request.HandlerListStopOnError handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler) @@ -90,17 +93,28 @@ func Handlers() request.Handlers { func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { return credentials.NewCredentials(&credentials.ChainProvider{ VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), - Providers: []credentials.Provider{ - &credentials.EnvProvider{}, - &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, - RemoteCredProvider(*cfg, handlers), - }, + Providers: CredProviders(cfg, handlers), }) } +// CredProviders returns the slice of providers used in +// the default credential chain. +// +// For applications that need to use some other provider (for example use +// different environment variables for legacy reasons) but still fall back +// on the default chain of providers. This allows that default chaint to be +// automatically updated +func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider { + return []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + RemoteCredProvider(*cfg, handlers), + } +} + const ( - httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" - ecsCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" + httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" + httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" ) // RemoteCredProvider returns a credentials provider for the default remote @@ -110,22 +124,51 @@ func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.P return localHTTPCredProvider(cfg, handlers, u) } - if uri := os.Getenv(ecsCredsProviderEnvVar); len(uri) > 0 { - u := fmt.Sprintf("http://169.254.170.2%s", uri) + if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 { + u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri) return httpCredProvider(cfg, handlers, u) } return ec2RoleProvider(cfg, handlers) } +var lookupHostFn = net.LookupHost + +func isLoopbackHost(host string) (bool, error) { + ip := net.ParseIP(host) + if ip != nil { + return ip.IsLoopback(), nil + } + + // Host is not an ip, perform lookup + addrs, err := lookupHostFn(host) + if err != nil { + return false, err + } + for _, addr := range addrs { + if !net.ParseIP(addr).IsLoopback() { + return false, nil + } + } + + return true, nil +} + func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { var errMsg string parsed, err := url.Parse(u) if err != nil { errMsg = fmt.Sprintf("invalid URL, %v", err) - } else if host := aws.URLHostname(parsed); !(host == "localhost" || host == "127.0.0.1") { - errMsg = fmt.Sprintf("invalid host address, %q, only localhost and 127.0.0.1 are valid.", host) + } else { + host := aws.URLHostname(parsed) + if len(host) == 0 { + errMsg = "unable to parse host from local HTTP cred provider URL" + } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { + errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr) + } else if !isLoopback { + errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host) + } } if len(errMsg) > 0 { @@ -145,6 +188,7 @@ func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) crede return endpointcreds.NewProviderClient(cfg, handlers, u, func(p *endpointcreds.Provider) { p.ExpiryWindow = 5 * time.Minute + p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar) }, ) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go index 984407a580..88e2fc7073 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -4,12 +4,12 @@ import ( "encoding/json" "fmt" "net/http" - "path" "strings" "time" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkuri" ) // GetMetadata uses the path provided to request information from the EC2 @@ -19,7 +19,7 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) { op := &request.Operation{ Name: "GetMetadata", HTTPMethod: "GET", - HTTPPath: path.Join("/", "meta-data", p), + HTTPPath: sdkuri.PathJoin("/meta-data", p), } output := &metadataOutput{} @@ -35,7 +35,7 @@ func (c *EC2Metadata) GetUserData() (string, error) { op := &request.Operation{ Name: "GetUserData", HTTPMethod: "GET", - HTTPPath: path.Join("/", "user-data"), + HTTPPath: "/user-data", } output := &metadataOutput{} @@ -56,7 +56,7 @@ func (c *EC2Metadata) GetDynamicData(p string) (string, error) { op := &request.Operation{ Name: "GetDynamicData", HTTPMethod: "GET", - HTTPPath: path.Join("/", "dynamic", p), + HTTPPath: sdkuri.PathJoin("/dynamic", p), } output := &metadataOutput{} @@ -118,6 +118,10 @@ func (c *EC2Metadata) Region() (string, error) { return "", err } + if len(resp) == 0 { + return "", awserr.New("EC2MetadataError", "invalid Region response", nil) + } + // returns region without the suffix. Eg: us-west-2a becomes us-west-2 return resp[:len(resp)-1], nil } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go index 5b4379dbd8..7d1f66e4e8 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -1,5 +1,10 @@ // Package ec2metadata provides the client for making API calls to the // EC2 Metadata service. +// +// This package's client can be disabled completely by setting the environment +// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to +// true instructs the SDK to disable the EC2 Metadata client. The client cannot +// be used while the environment variable is set to true, (case insensitive). package ec2metadata import ( @@ -7,17 +12,21 @@ import ( "errors" "io" "net/http" + "os" + "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/request" ) // ServiceName is the name of the service. const ServiceName = "ec2metadata" +const disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" // A EC2Metadata is an EC2 Metadata service Client. type EC2Metadata struct { @@ -63,6 +72,7 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio cfg, metadata.ClientInfo{ ServiceName: ServiceName, + ServiceID: ServiceName, Endpoint: endpoint, APIVersion: "latest", }, @@ -75,6 +85,21 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio svc.Handlers.Validate.Clear() svc.Handlers.Validate.PushBack(validateEndpointHandler) + // Disable the EC2 Metadata service if the environment variable is set. + // This shortcirctes the service's functionality to always fail to send + // requests. + if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" { + svc.Handlers.Send.SwapNamed(request.NamedHandler{ + Name: corehandlers.SendHandler.Name, + Fn: func(r *request.Request) { + r.Error = awserr.New( + request.CanceledErrorCode, + "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var", + nil) + }, + }) + } + // Add additional options to the service config for _, option := range opts { option(svc.Client) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go index 74f72de073..87b9ff3ffe 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -84,6 +84,8 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol custAddEC2Metadata(p) custAddS3DualStack(p) custRmIotDataService(p) + custFixAppAutoscalingChina(p) + custFixAppAutoscalingUsGov(p) } return ps, nil @@ -94,7 +96,12 @@ func custAddS3DualStack(p *partition) { return } - s, ok := p.Services["s3"] + custAddDualstack(p, "s3") + custAddDualstack(p, "s3-control") +} + +func custAddDualstack(p *partition, svcName string) { + s, ok := p.Services[svcName] if !ok { return } @@ -102,7 +109,7 @@ func custAddS3DualStack(p *partition) { s.Defaults.HasDualStack = boxedTrue s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}" - p.Services["s3"] = s + p.Services[svcName] = s } func custAddEC2Metadata(p *partition) { @@ -122,6 +129,54 @@ func custRmIotDataService(p *partition) { delete(p.Services, "data.iot") } +func custFixAppAutoscalingChina(p *partition) { + if p.ID != "aws-cn" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + const expectHostname = `autoscaling.{region}.amazonaws.com` + if e, a := s.Defaults.Hostname, expectHostname; e != a { + fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a) + return + } + + s.Defaults.Hostname = expectHostname + ".cn" + p.Services[serviceName] = s +} + +func custFixAppAutoscalingUsGov(p *partition) { + if p.ID != "aws-us-gov" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + if a := s.Defaults.CredentialScope.Service; a != "" { + fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a) + return + } + + if a := s.Defaults.Hostname; a != "" { + fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a) + return + } + + s.Defaults.CredentialScope.Service = "application-autoscaling" + s.Defaults.Hostname = "autoscaling.{region}.amazonaws.com" + + p.Services[serviceName] = s +} + type decodeModelError struct { awsError } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index d6d87e446c..96ddf8fcac 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -22,8 +22,10 @@ const ( ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). CaCentral1RegionID = "ca-central-1" // Canada (Central). EuCentral1RegionID = "eu-central-1" // EU (Frankfurt). + EuNorth1RegionID = "eu-north-1" // EU (Stockholm). EuWest1RegionID = "eu-west-1" // EU (Ireland). EuWest2RegionID = "eu-west-2" // EU (London). + EuWest3RegionID = "eu-west-3" // EU (Paris). SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). UsEast1RegionID = "us-east-1" // US East (N. Virginia). UsEast2RegionID = "us-east-2" // US East (Ohio). @@ -33,116 +35,16 @@ const ( // AWS China partition's regions. const ( - CnNorth1RegionID = "cn-north-1" // China (Beijing). + CnNorth1RegionID = "cn-north-1" // China (Beijing). + CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia). ) // AWS GovCloud (US) partition's regions. const ( + UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East). UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US). ) -// Service identifiers -const ( - AcmServiceID = "acm" // Acm. - ApigatewayServiceID = "apigateway" // Apigateway. - ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. - Appstream2ServiceID = "appstream2" // Appstream2. - AthenaServiceID = "athena" // Athena. - AutoscalingServiceID = "autoscaling" // Autoscaling. - BatchServiceID = "batch" // Batch. - BudgetsServiceID = "budgets" // Budgets. - ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. - CloudformationServiceID = "cloudformation" // Cloudformation. - CloudfrontServiceID = "cloudfront" // Cloudfront. - CloudhsmServiceID = "cloudhsm" // Cloudhsm. - CloudsearchServiceID = "cloudsearch" // Cloudsearch. - CloudtrailServiceID = "cloudtrail" // Cloudtrail. - CodebuildServiceID = "codebuild" // Codebuild. - CodecommitServiceID = "codecommit" // Codecommit. - CodedeployServiceID = "codedeploy" // Codedeploy. - CodepipelineServiceID = "codepipeline" // Codepipeline. - CodestarServiceID = "codestar" // Codestar. - CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. - CognitoIdpServiceID = "cognito-idp" // CognitoIdp. - CognitoSyncServiceID = "cognito-sync" // CognitoSync. - ConfigServiceID = "config" // Config. - CurServiceID = "cur" // Cur. - DatapipelineServiceID = "datapipeline" // Datapipeline. - DevicefarmServiceID = "devicefarm" // Devicefarm. - DirectconnectServiceID = "directconnect" // Directconnect. - DiscoveryServiceID = "discovery" // Discovery. - DmsServiceID = "dms" // Dms. - DsServiceID = "ds" // Ds. - DynamodbServiceID = "dynamodb" // Dynamodb. - Ec2ServiceID = "ec2" // Ec2. - Ec2metadataServiceID = "ec2metadata" // Ec2metadata. - EcrServiceID = "ecr" // Ecr. - EcsServiceID = "ecs" // Ecs. - ElasticacheServiceID = "elasticache" // Elasticache. - ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. - ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. - ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. - ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. - ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. - EmailServiceID = "email" // Email. - EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. - EsServiceID = "es" // Es. - EventsServiceID = "events" // Events. - FirehoseServiceID = "firehose" // Firehose. - GameliftServiceID = "gamelift" // Gamelift. - GlacierServiceID = "glacier" // Glacier. - HealthServiceID = "health" // Health. - IamServiceID = "iam" // Iam. - ImportexportServiceID = "importexport" // Importexport. - InspectorServiceID = "inspector" // Inspector. - IotServiceID = "iot" // Iot. - KinesisServiceID = "kinesis" // Kinesis. - KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. - KmsServiceID = "kms" // Kms. - LambdaServiceID = "lambda" // Lambda. - LightsailServiceID = "lightsail" // Lightsail. - LogsServiceID = "logs" // Logs. - MachinelearningServiceID = "machinelearning" // Machinelearning. - MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. - MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. - MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. - ModelsLexServiceID = "models.lex" // ModelsLex. - MonitoringServiceID = "monitoring" // Monitoring. - MturkRequesterServiceID = "mturk-requester" // MturkRequester. - OpsworksServiceID = "opsworks" // Opsworks. - OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. - OrganizationsServiceID = "organizations" // Organizations. - PinpointServiceID = "pinpoint" // Pinpoint. - PollyServiceID = "polly" // Polly. - RdsServiceID = "rds" // Rds. - RedshiftServiceID = "redshift" // Redshift. - RekognitionServiceID = "rekognition" // Rekognition. - Route53ServiceID = "route53" // Route53. - Route53domainsServiceID = "route53domains" // Route53domains. - RuntimeLexServiceID = "runtime.lex" // RuntimeLex. - S3ServiceID = "s3" // S3. - SdbServiceID = "sdb" // Sdb. - ServicecatalogServiceID = "servicecatalog" // Servicecatalog. - ShieldServiceID = "shield" // Shield. - SmsServiceID = "sms" // Sms. - SnowballServiceID = "snowball" // Snowball. - SnsServiceID = "sns" // Sns. - SqsServiceID = "sqs" // Sqs. - SsmServiceID = "ssm" // Ssm. - StatesServiceID = "states" // States. - StoragegatewayServiceID = "storagegateway" // Storagegateway. - StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. - StsServiceID = "sts" // Sts. - SupportServiceID = "support" // Support. - SwfServiceID = "swf" // Swf. - TaggingServiceID = "tagging" // Tagging. - WafServiceID = "waf" // Waf. - WafRegionalServiceID = "waf-regional" // WafRegional. - WorkdocsServiceID = "workdocs" // Workdocs. - WorkspacesServiceID = "workspaces" // Workspaces. - XrayServiceID = "xray" // Xray. -) - // DefaultResolver returns an Endpoint resolver that will be able // to resolve endpoints for: AWS Standard, AWS China, and AWS GovCloud (US). // @@ -210,12 +112,18 @@ var awsPartition = partition{ "eu-central-1": region{ Description: "EU (Frankfurt)", }, + "eu-north-1": region{ + Description: "EU (Stockholm)", + }, "eu-west-1": region{ Description: "EU (Ireland)", }, "eu-west-2": region{ Description: "EU (London)", }, + "eu-west-3": region{ + Description: "EU (Paris)", + }, "sa-east-1": region{ Description: "South America (Sao Paulo)", }, @@ -233,6 +141,12 @@ var awsPartition = partition{ }, }, Services: services{ + "a4b": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, "acm": service{ Endpoints: endpoints{ @@ -243,8 +157,10 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -252,74 +168,148 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "apigateway": service{ - + "acm-pca": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "application-autoscaling": service{ - Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "application-autoscaling", + "api.ecr": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "api.ecr.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "api.ecr.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "api.ecr.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "api.ecr.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "api.ecr.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "api.ecr.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "api.ecr.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "api.ecr.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "api.ecr.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "api.ecr.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "api.ecr.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "sa-east-1": endpoint{ + Hostname: "api.ecr.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "api.ecr.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "api.ecr.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "api.ecr.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "api.ecr.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, }, }, + }, + "api.mediatailor": service{ + Endpoints: endpoints{ "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "appstream2": service{ + "api.pricing": service{ Defaults: endpoint{ - Protocols: []string{"https"}, CredentialScope: credentialScope{ - Service: "appstream", + Service: "pricing", }, }, Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "ap-south-1": endpoint{}, + "us-east-1": endpoint{}, }, }, - "athena": service{ + "api.sagemaker": service{ - Endpoints: endpoints{ - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -330,47 +320,37 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "batch": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "budgets": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "budgets.amazonaws.com", + "us-east-1-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, - "clouddirectory": service{ - - Endpoints: endpoints{ - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudformation": service{ + "apigateway": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -380,8 +360,10 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -389,37 +371,40 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "cloudfront": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "cloudfront.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", }, }, - }, - "cloudhsm": service{ - Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "cloudsearch": service{ - + "appstream2": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -427,13 +412,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "cloudtrail": service{ + "appsync": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -441,45 +424,60 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "codebuild": service{ + "athena": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, }, }, - "codecommit": service{ - + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "codedeploy": service{ - + "autoscaling-plans": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "autoscaling-plans", + }, + }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -490,73 +488,92 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "codepipeline": service{ + "batch": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "codestar": service{ + "budgets": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cognito-identity": service{ - + "aws-global": endpoint{ + Hostname: "budgets.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "ce": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "aws-global": endpoint{ + Hostname: "ce.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, }, }, - "cognito-idp": service{ + "chime": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpoint{ + SSLCommonName: "service.chime.aws.amazon.com", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "service.chime.aws.amazon.com", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloud9": service{ Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, + "ap-southeast-1": endpoint{}, "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, }, }, - "cognito-sync": service{ + "clouddirectory": service{ Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, @@ -565,7 +582,7 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "config": service{ + "cloudformation": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -575,8 +592,10 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -584,30 +603,41 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "cur": service{ + "cloudfront": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, Endpoints: endpoints{ - "us-east-1": endpoint{}, + "aws-global": endpoint{ + Hostname: "cloudfront.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, }, }, - "datapipeline": service{ + "cloudhsm": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "devicefarm": service{ - - Endpoints: endpoints{ - "us-west-2": endpoint{}, + "cloudhsmv2": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, }, - }, - "directconnect": service{ - Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -616,22 +646,32 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, + "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "discovery": service{ + "cloudsearch": service{ Endpoints: endpoints{ - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "dms": service{ + "cloudtrail": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -641,8 +681,10 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -650,26 +692,52 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "ds": service{ + "codebuild": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "codebuild-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "codebuild-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "codebuild-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "codebuild-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, - "dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, + "codecommit": service{ + Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -680,11 +748,11 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "local": endpoint{ - Hostname: "localhost:8000", - Protocols: []string{"http"}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "codecommit-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ca-central-1", }, }, "sa-east-1": endpoint{}, @@ -694,10 +762,8 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "ec2": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, + "codedeploy": service{ + Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -706,46 +772,66 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, + "us-east-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "codedeploy-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "codedeploy-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, }, }, }, - "ecr": service{ + "codepipeline": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "ecs": service{ + "codestar": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, @@ -758,7 +844,7 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "elasticache": service{ + "cognito-identity": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -770,14 +856,12 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "elasticbeanstalk": service{ + "cognito-idp": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -789,27 +873,44 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "elasticfilesystem": service{ + "cognito-sync": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, }, }, - "elasticloadbalancing": service{ + "comprehend": service{ Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, + }, + "config": service{ + Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -818,8 +919,10 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -827,75 +930,45 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "elasticmapreduce": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.{service}.{dnsSuffix}", - Protocols: []string{"http", "https"}, + "cur": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, }, + }, + "datapipeline": service{ + Endpoints: endpoints{ "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{ - SSLCommonName: "{service}.{region}.{dnsSuffix}", - }, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "{service}.{region}.{dnsSuffix}", - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "elastictranscoder": service{ + "datasync": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, - "ap-south-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "email": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "entitlement.marketplace": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "es": service{ + "dax": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -903,7 +976,13 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "events": service{ + "devicefarm": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "directconnect": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -913,8 +992,10 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -922,114 +1003,115 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "firehose": service{ + "discovery": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "gamelift": service{ + "dms": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "glacier": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "health": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, + "docdb": service{ Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "iam.amazonaws.com", + "eu-west-1": endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, - }, - }, - "importexport": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "importexport.amazonaws.com", - SignatureVersions: []string{"v2", "v4"}, + "us-east-2": endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", - Service: "IngestionService", + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", }, }, }, }, - "inspector": service{ + "ds": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "iot": service{ + "dynamodb": service{ Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, + Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "kinesis": service{ - + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1038,8 +1120,10 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1047,15 +1131,18 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "kinesisanalytics": service{ + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, }, }, - "kms": service{ + "ecs": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -1065,8 +1152,10 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1074,7 +1163,34 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "lambda": service{ + "elasticache": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticbeanstalk": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -1082,20 +1198,24 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "lightsail": service{ + "elasticfilesystem": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, - "ap-south-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, @@ -1103,11 +1223,14 @@ var awsPartition = partition{ "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "logs": service{ - + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1116,8 +1239,10 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1125,24 +1250,10 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "machinelearning": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - }, - }, - "marketplacecommerceanalytics": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "metering.marketplace": service{ + "elasticmapreduce": service{ Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, + SSLCommonName: "{region}.{service}.{dnsSuffix}", + Protocols: []string{"https"}, }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -1151,36 +1262,55 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, + "eu-central-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elastictranscoder": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "mobileanalytics": service{ + "email": service{ Endpoints: endpoints{ + "eu-west-1": endpoint{}, "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "models.lex": service{ + "entitlement.marketplace": service{ Defaults: endpoint{ CredentialScope: credentialScope{ - Service: "lex", + Service: "aws-marketplace", }, }, Endpoints: endpoints{ "us-east-1": endpoint{}, }, }, - "monitoring": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, + "es": service{ + Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1189,26 +1319,24 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mturk-requester": service{ - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "sandbox": endpoint{ - Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "es-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, }, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "opsworks": service{ + "events": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -1216,9 +1344,12 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1226,7 +1357,762 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "opsworks-cm": service{ + "firehose": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "fms": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "fsx": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "gamelift": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "glue": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "iam.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "importexport": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "importexport.amazonaws.com", + SignatureVersions: []string{"v2", "v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + Service: "IngestionService", + }, + }, + }, + }, + "inspector": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iotanalytics": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisvideo": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ProdFips": endpoint{ + Hostname: "kms-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lightsail": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "machinelearning": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "marketplacecommerceanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "medialive": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediapackage": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediastore": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mgh": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "mobileanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "models.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mq": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mturk-requester": service{ + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "sandbox": endpoint{ + Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", + }, + "us-east-1": endpoint{}, + }, + }, + "neptune": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "rds.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "eu-central-1": endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "opsworks": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "opsworks-cm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "organizations.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "pinpoint": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + Endpoints: endpoints{ + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "resource-groups": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "robomaker": service{ Endpoints: endpoints{ "eu-west-1": endpoint{}, @@ -1234,39 +2120,299 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "organizations": service{ + "route53": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, Endpoints: endpoints{ "aws-global": endpoint{ - Hostname: "organizations.us-east-1.amazonaws.com", + Hostname: "route53.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "route53domains": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "route53resolver": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "runtime.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "s3": service{ + PartitionEndpoint: "us-east-1", + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "s3.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{ + Hostname: "s3.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-southeast-2": endpoint{ + Hostname: "s3.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{ + Hostname: "s3.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "s3-external-1": endpoint{ + Hostname: "s3-external-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "s3.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-1": endpoint{ + Hostname: "s3.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{ + Hostname: "s3.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-west-2": endpoint{ + Hostname: "s3.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "s3-control.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "s3-control.ap-northeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "s3-control.ap-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "s3-control.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "s3-control.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "s3-control.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "s3-control.eu-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "s3-control.eu-north-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "s3-control.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "s3-control.eu-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "s3-control.eu-west-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "sa-east-1": endpoint{ + Hostname: "s3-control.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "s3-control.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-1-fips": endpoint{ + Hostname: "s3-control-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, CredentialScope: credentialScope{ Region: "us-east-1", }, }, + "us-east-2": endpoint{ + Hostname: "s3-control.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-east-2-fips": endpoint{ + Hostname: "s3-control-fips.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "s3-control.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-1-fips": endpoint{ + Hostname: "s3-control-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "s3-control.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-west-2-fips": endpoint{ + Hostname: "s3-control-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, - "pinpoint": service{ + "sdb": service{ Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "mobiletargeting", - }, - }, - Endpoints: endpoints{ - "us-east-1": endpoint{}, + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v2"}, }, - }, - "polly": service{ - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + Hostname: "sdb.amazonaws.com", + }, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "rds": service{ + "secretsmanager": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -1278,16 +2424,39 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "{service}.{dnsSuffix}", + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, }, "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, - "redshift": service{ + "securityhub": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -1299,6 +2468,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1306,138 +2476,124 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "rekognition": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "route53.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "route53domains": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "runtime.lex": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "s3": service{ - PartitionEndpoint: "us-east-1", - IsRegionalized: boxedTrue, + "serverlessrepo": service{ Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + Protocols: []string{"https"}, }, Endpoints: endpoints{ "ap-northeast-1": endpoint{ - Hostname: "s3-ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + Protocols: []string{"https"}, + }, + "ap-northeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ap-south-1": endpoint{ + Protocols: []string{"https"}, }, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{ - Hostname: "s3-ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + Protocols: []string{"https"}, }, "ap-southeast-2": endpoint{ - Hostname: "s3-ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + Protocols: []string{"https"}, + }, + "ca-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-central-1": endpoint{ + Protocols: []string{"https"}, }, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, "eu-west-1": endpoint{ - Hostname: "s3-eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + Protocols: []string{"https"}, }, - "eu-west-2": endpoint{}, - "s3-external-1": endpoint{ - Hostname: "s3-external-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, + "eu-west-2": endpoint{ + Protocols: []string{"https"}, }, "sa-east-1": endpoint{ - Hostname: "s3-sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + Protocols: []string{"https"}, }, "us-east-1": endpoint{ - Hostname: "s3.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + Protocols: []string{"https"}, + }, + "us-east-2": endpoint{ + Protocols: []string{"https"}, }, - "us-east-2": endpoint{}, "us-west-1": endpoint{ - Hostname: "s3-us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + Protocols: []string{"https"}, }, "us-west-2": endpoint{ - Hostname: "s3-us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + Protocols: []string{"https"}, }, }, }, - "sdb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"v2"}, - }, + "servicecatalog": service{ + Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - Hostname: "sdb.amazonaws.com", + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, }, "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, - "servicecatalog": service{ + "servicediscovery": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, "shield": service{ IsRegionalized: boxedFalse, Defaults: endpoint{ - SSLCommonName: "Shield.us-east-1.amazonaws.com", + SSLCommonName: "shield.us-east-1.amazonaws.com", Protocols: []string{"https"}, }, Endpoints: endpoints{ @@ -1447,19 +2603,36 @@ var awsPartition = partition{ "sms": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "snowball": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1478,8 +2651,10 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1500,9 +2675,35 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sqs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sqs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sqs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sqs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, "us-east-1": endpoint{ SSLCommonName: "queue.{dnsSuffix}", }, @@ -1521,8 +2722,10 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1534,10 +2737,20 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1551,8 +2764,10 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1575,8 +2790,10 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "local": endpoint{ Hostname: "localhost:8000", Protocols: []string{"http"}, @@ -1613,8 +2830,10 @@ var awsPartition = partition{ "aws-global": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ @@ -1662,8 +2881,10 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1681,8 +2902,10 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1690,6 +2913,54 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "transfer": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "translate-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "translate-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "translate-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, "waf": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -1707,8 +2978,12 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1723,14 +2998,28 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "workmail": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "workspaces": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, @@ -1745,8 +3034,10 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1781,30 +3072,87 @@ var awscnPartition = partition{ "cn-north-1": region{ Description: "China (Beijing)", }, + "cn-northwest-1": region{ + Description: "China (Ningxia)", + }, }, Services: services{ + "api.ecr": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "api.ecr.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "autoscaling": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "cloudformation": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "cloudtrail": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "codedeploy": service{ + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cognito-identity": service{ + Endpoints: endpoints{ "cn-north-1": endpoint{}, }, @@ -1812,13 +3160,29 @@ var awscnPartition = partition{ "config": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "directconnect": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "dynamodb": service{ @@ -1826,7 +3190,8 @@ var awscnPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "ec2": service{ @@ -1834,7 +3199,8 @@ var awscnPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "ec2metadata": service{ @@ -1848,36 +3214,68 @@ var awscnPartition = partition{ }, }, }, + "ecs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "elasticache": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "elasticbeanstalk": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "elasticloadbalancing": service{ Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Protocols: []string{"https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "elasticmapreduce": service{ Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Protocols: []string{"https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "events": service{ + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "gamelift": service{ + Endpoints: endpoints{ "cn-north-1": endpoint{}, }, @@ -1887,7 +3285,8 @@ var awscnPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "iam": service{ @@ -1903,16 +3302,35 @@ var awscnPartition = partition{ }, }, }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, "kinesis": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "logs": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "monitoring": service{ @@ -1920,19 +3338,28 @@ var awscnPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, }, }, "rds": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "redshift": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "s3": service{ @@ -1940,6 +3367,42 @@ var awscnPartition = partition{ Protocols: []string{"http", "https"}, SignatureVersions: []string{"s3v4"}, }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "s3-control.cn-north-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "snowball": service{ + Endpoints: endpoints{ "cn-north-1": endpoint{}, }, @@ -1949,7 +3412,8 @@ var awscnPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "sqs": service{ @@ -1958,7 +3422,22 @@ var awscnPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "storagegateway": service{ @@ -1975,67 +3454,137 @@ var awscnPartition = partition{ }, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "sts": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + }, +} + +// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). +func AwsUsGovPartition() Partition { + return awsusgovPartition.Partition() +} + +var awsusgovPartition = partition{ + ID: "aws-us-gov", + Name: "AWS GovCloud (US)", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-gov-east-1": region{ + Description: "AWS GovCloud (US-East)", + }, + "us-gov-west-1": region{ + Description: "AWS GovCloud (US)", + }, + }, + Services: services{ + "acm": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "api.ecr": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "api.ecr.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "api.ecr.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, }, - }, - "swf": service{ - Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "tagging": service{ + "athena": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - }, -} - -// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). -func AwsUsGovPartition() Partition { - return awsusgovPartition.Partition() -} - -var awsusgovPartition = partition{ - ID: "aws-us-gov", - Name: "AWS GovCloud (US)", - DNSSuffix: "amazonaws.com", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "us-gov-west-1": region{ - Description: "AWS GovCloud (US)", - }, - }, - Services: services{ "autoscaling": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{ Protocols: []string{"http", "https"}, }, }, }, + "clouddirectory": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "cloudformation": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, @@ -2045,26 +3594,66 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "cloudhsmv2": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "cloudtrail": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, "codedeploy": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "config": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, "directconnect": service{ + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "ds": service{ + Endpoints: endpoints{ "us-gov-west-1": endpoint{}, }, @@ -2072,12 +3661,20 @@ var awsusgovPartition = partition{ "dynamodb": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "ec2": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, @@ -2092,8 +3689,35 @@ var awsusgovPartition = partition{ }, }, }, + "ecs": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "elasticache": service{ + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "elasticache-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticfilesystem": service{ + Endpoints: endpoints{ "us-gov-west-1": endpoint{}, }, @@ -2101,6 +3725,7 @@ var awsusgovPartition = partition{ "elasticloadbalancing": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{ Protocols: []string{"http", "https"}, }, @@ -2109,13 +3734,34 @@ var awsusgovPartition = partition{ "elasticmapreduce": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, + Protocols: []string{"https"}, + }, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "es-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, "events": service{ + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "firehose": service{ + Endpoints: endpoints{ "us-gov-west-1": endpoint{}, }, @@ -2123,11 +3769,27 @@ var awsusgovPartition = partition{ "glacier": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{ Protocols: []string{"http", "https"}, }, }, }, + "glue": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "iam": service{ PartitionEndpoint: "aws-us-gov-global", IsRegionalized: boxedFalse, @@ -2141,32 +3803,82 @@ var awsusgovPartition = partition{ }, }, }, + "inspector": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "kinesis": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, "kms": service{ Endpoints: endpoints{ + "ProdFips": endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, "lambda": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, "logs": service{ + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, Endpoints: endpoints{ "us-gov-west-1": endpoint{}, }, }, "monitoring": service{ + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "polly": service{ + Endpoints: endpoints{ "us-gov-west-1": endpoint{}, }, @@ -2174,11 +3886,25 @@ var awsusgovPartition = partition{ "rds": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, "redshift": service{ + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "runtime.sagemaker": service{ + Endpoints: endpoints{ "us-gov-west-1": endpoint{}, }, @@ -2194,21 +3920,70 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + "us-gov-east-1": endpoint{ + Hostname: "s3.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, "us-gov-west-1": endpoint{ - Hostname: "s3-us-gov-west-1.amazonaws.com", + Hostname: "s3.us-gov-west-1.amazonaws.com", Protocols: []string{"http", "https"}, }, }, }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "s3-control.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-east-1-fips": endpoint{ + Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "s3-control.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1-fips": endpoint{ + Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "snowball": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, "sns": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{ Protocols: []string{"http", "https"}, }, @@ -2217,12 +3992,33 @@ var awsusgovPartition = partition{ "sqs": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{ SSLCommonName: "{region}.queue.{dnsSuffix}", Protocols: []string{"http", "https"}, }, }, }, + "ssm": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "streams.dynamodb": service{ Defaults: endpoint{ CredentialScope: credentialScope{ @@ -2230,17 +4026,53 @@ var awsusgovPartition = partition{ }, }, Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "sts": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, "swf": service{ + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "translate-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "workspaces": service{ + Endpoints: endpoints{ "us-gov-west-1": endpoint{}, }, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go new file mode 100644 index 0000000000..000dd79eec --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go @@ -0,0 +1,141 @@ +package endpoints + +// Service identifiers +// +// Deprecated: Use client package's EndpointID value instead of these +// ServiceIDs. These IDs are not maintained, and are out of date. +const ( + A4bServiceID = "a4b" // A4b. + AcmServiceID = "acm" // Acm. + AcmPcaServiceID = "acm-pca" // AcmPca. + ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. + ApiPricingServiceID = "api.pricing" // ApiPricing. + ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker. + ApigatewayServiceID = "apigateway" // Apigateway. + ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. + Appstream2ServiceID = "appstream2" // Appstream2. + AppsyncServiceID = "appsync" // Appsync. + AthenaServiceID = "athena" // Athena. + AutoscalingServiceID = "autoscaling" // Autoscaling. + AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. + BatchServiceID = "batch" // Batch. + BudgetsServiceID = "budgets" // Budgets. + CeServiceID = "ce" // Ce. + ChimeServiceID = "chime" // Chime. + Cloud9ServiceID = "cloud9" // Cloud9. + ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. + CloudformationServiceID = "cloudformation" // Cloudformation. + CloudfrontServiceID = "cloudfront" // Cloudfront. + CloudhsmServiceID = "cloudhsm" // Cloudhsm. + Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. + CloudsearchServiceID = "cloudsearch" // Cloudsearch. + CloudtrailServiceID = "cloudtrail" // Cloudtrail. + CodebuildServiceID = "codebuild" // Codebuild. + CodecommitServiceID = "codecommit" // Codecommit. + CodedeployServiceID = "codedeploy" // Codedeploy. + CodepipelineServiceID = "codepipeline" // Codepipeline. + CodestarServiceID = "codestar" // Codestar. + CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. + CognitoIdpServiceID = "cognito-idp" // CognitoIdp. + CognitoSyncServiceID = "cognito-sync" // CognitoSync. + ComprehendServiceID = "comprehend" // Comprehend. + ConfigServiceID = "config" // Config. + CurServiceID = "cur" // Cur. + DatapipelineServiceID = "datapipeline" // Datapipeline. + DaxServiceID = "dax" // Dax. + DevicefarmServiceID = "devicefarm" // Devicefarm. + DirectconnectServiceID = "directconnect" // Directconnect. + DiscoveryServiceID = "discovery" // Discovery. + DmsServiceID = "dms" // Dms. + DsServiceID = "ds" // Ds. + DynamodbServiceID = "dynamodb" // Dynamodb. + Ec2ServiceID = "ec2" // Ec2. + Ec2metadataServiceID = "ec2metadata" // Ec2metadata. + EcrServiceID = "ecr" // Ecr. + EcsServiceID = "ecs" // Ecs. + ElasticacheServiceID = "elasticache" // Elasticache. + ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. + ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. + ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. + ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. + ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. + EmailServiceID = "email" // Email. + EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. + EsServiceID = "es" // Es. + EventsServiceID = "events" // Events. + FirehoseServiceID = "firehose" // Firehose. + FmsServiceID = "fms" // Fms. + GameliftServiceID = "gamelift" // Gamelift. + GlacierServiceID = "glacier" // Glacier. + GlueServiceID = "glue" // Glue. + GreengrassServiceID = "greengrass" // Greengrass. + GuarddutyServiceID = "guardduty" // Guardduty. + HealthServiceID = "health" // Health. + IamServiceID = "iam" // Iam. + ImportexportServiceID = "importexport" // Importexport. + InspectorServiceID = "inspector" // Inspector. + IotServiceID = "iot" // Iot. + IotanalyticsServiceID = "iotanalytics" // Iotanalytics. + KinesisServiceID = "kinesis" // Kinesis. + KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. + KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. + KmsServiceID = "kms" // Kms. + LambdaServiceID = "lambda" // Lambda. + LightsailServiceID = "lightsail" // Lightsail. + LogsServiceID = "logs" // Logs. + MachinelearningServiceID = "machinelearning" // Machinelearning. + MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. + MediaconvertServiceID = "mediaconvert" // Mediaconvert. + MedialiveServiceID = "medialive" // Medialive. + MediapackageServiceID = "mediapackage" // Mediapackage. + MediastoreServiceID = "mediastore" // Mediastore. + MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. + MghServiceID = "mgh" // Mgh. + MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. + ModelsLexServiceID = "models.lex" // ModelsLex. + MonitoringServiceID = "monitoring" // Monitoring. + MturkRequesterServiceID = "mturk-requester" // MturkRequester. + NeptuneServiceID = "neptune" // Neptune. + OpsworksServiceID = "opsworks" // Opsworks. + OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. + OrganizationsServiceID = "organizations" // Organizations. + PinpointServiceID = "pinpoint" // Pinpoint. + PollyServiceID = "polly" // Polly. + RdsServiceID = "rds" // Rds. + RedshiftServiceID = "redshift" // Redshift. + RekognitionServiceID = "rekognition" // Rekognition. + ResourceGroupsServiceID = "resource-groups" // ResourceGroups. + Route53ServiceID = "route53" // Route53. + Route53domainsServiceID = "route53domains" // Route53domains. + RuntimeLexServiceID = "runtime.lex" // RuntimeLex. + RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. + S3ServiceID = "s3" // S3. + S3ControlServiceID = "s3-control" // S3Control. + SagemakerServiceID = "api.sagemaker" // Sagemaker. + SdbServiceID = "sdb" // Sdb. + SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. + ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. + ServicecatalogServiceID = "servicecatalog" // Servicecatalog. + ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. + ShieldServiceID = "shield" // Shield. + SmsServiceID = "sms" // Sms. + SnowballServiceID = "snowball" // Snowball. + SnsServiceID = "sns" // Sns. + SqsServiceID = "sqs" // Sqs. + SsmServiceID = "ssm" // Ssm. + StatesServiceID = "states" // States. + StoragegatewayServiceID = "storagegateway" // Storagegateway. + StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. + StsServiceID = "sts" // Sts. + SupportServiceID = "support" // Support. + SwfServiceID = "swf" // Swf. + TaggingServiceID = "tagging" // Tagging. + TransferServiceID = "transfer" // Transfer. + TranslateServiceID = "translate" // Translate. + WafServiceID = "waf" // Waf. + WafRegionalServiceID = "waf-regional" // WafRegional. + WorkdocsServiceID = "workdocs" // Workdocs. + WorkmailServiceID = "workmail" // Workmail. + WorkspacesServiceID = "workspaces" // Workspaces. + XrayServiceID = "xray" // Xray. +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go index a0e9bc4547..84316b92c0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go @@ -21,12 +21,12 @@ // partitions := resolver.(endpoints.EnumPartitions).Partitions() // // for _, p := range partitions { -// fmt.Println("Regions for", p.Name) +// fmt.Println("Regions for", p.ID()) // for id, _ := range p.Regions() { // fmt.Println("*", id) // } // -// fmt.Println("Services for", p.Name) +// fmt.Println("Services for", p.ID()) // for id, _ := range p.Services() { // fmt.Println("*", id) // } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go index 9c3eedb48d..f82babf6f9 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -35,7 +35,7 @@ type Options struct { // // If resolving an endpoint on the partition list the provided region will // be used to determine which partition's domain name pattern to the service - // endpoint ID with. If both the service and region are unkonwn and resolving + // endpoint ID with. If both the service and region are unknown and resolving // the endpoint on partition list an UnknownEndpointError error will be returned. // // If resolving and endpoint on a partition specific resolver that partition's @@ -206,10 +206,11 @@ func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) ( // enumerating over the regions in a partition. func (p Partition) Regions() map[string]Region { rs := map[string]Region{} - for id := range p.p.Regions { + for id, r := range p.p.Regions { rs[id] = Region{ - id: id, - p: p.p, + id: id, + desc: r.Description, + p: p.p, } } @@ -240,6 +241,10 @@ type Region struct { // ID returns the region's identifier. func (r Region) ID() string { return r.id } +// Description returns the region's description. The region description +// is free text, it can be empty, and it may change between SDK releases. +func (r Region) Description() string { return r.desc } + // ResolveEndpoint resolves an endpoint from the context of the region given // a service. See Partition.EndpointFor for usage and errors that can be returned. func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { @@ -284,10 +289,11 @@ func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (Resolve func (s Service) Regions() map[string]Region { rs := map[string]Region{} for id := range s.p.Services[s.id].Endpoints { - if _, ok := s.p.Regions[id]; ok { + if r, ok := s.p.Regions[id]; ok { rs[id] = Region{ - id: id, - p: s.p, + id: id, + desc: r.Description, + p: s.p, } } } @@ -347,6 +353,10 @@ type ResolvedEndpoint struct { // The service name that should be used for signing requests. SigningName string + // States that the signing name for this endpoint was derived from metadata + // passed in, but was not explicitly modeled. + SigningNameDerived bool + // The signing method that should be used for signing requests. SigningMethod string } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go index 13d968a249..ff6f76db6e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -226,16 +226,20 @@ func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, op if len(signingRegion) == 0 { signingRegion = region } + signingName := e.CredentialScope.Service + var signingNameDerived bool if len(signingName) == 0 { signingName = service + signingNameDerived = true } return ResolvedEndpoint{ - URL: u, - SigningRegion: signingRegion, - SigningName: signingName, - SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), + URL: u, + SigningRegion: signingRegion, + SigningName: signingName, + SigningNameDerived: signingNameDerived, + SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go index 05e92df22a..0fdfcc56e0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go @@ -16,6 +16,10 @@ import ( type CodeGenOptions struct { // Options for how the model will be decoded. DecodeModelOptions DecodeModelOptions + + // Disables code generation of the service endpoint prefix IDs defined in + // the model. + DisableGenerateServiceIDs bool } // Set combines all of the option functions together @@ -39,8 +43,16 @@ func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGe return err } + v := struct { + Resolver + CodeGenOptions + }{ + Resolver: resolver, + CodeGenOptions: opts, + } + tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl)) - if err := tmpl.ExecuteTemplate(outFile, "defaults", resolver); err != nil { + if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil { return fmt.Errorf("failed to execute template, %v", err) } @@ -166,15 +178,17 @@ import ( "regexp" ) - {{ template "partition consts" . }} + {{ template "partition consts" $.Resolver }} - {{ range $_, $partition := . }} + {{ range $_, $partition := $.Resolver }} {{ template "partition region consts" $partition }} {{ end }} - {{ template "service consts" . }} + {{ if not $.DisableGenerateServiceIDs -}} + {{ template "service consts" $.Resolver }} + {{- end }} - {{ template "endpoint resolvers" . }} + {{ template "endpoint resolvers" $.Resolver }} {{- end }} {{ define "partition consts" }} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go index 5766361686..fa06f7a8f8 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/errors.go @@ -5,13 +5,9 @@ import "github.com/aws/aws-sdk-go/aws/awserr" var ( // ErrMissingRegion is an error that is returned if region configuration is // not found. - // - // @readonly ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) // ErrMissingEndpoint is an error that is returned if an endpoint cannot be // resolved for a service. - // - // @readonly ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) ) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go index db87188e20..6ed15b2ecc 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/logger.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -26,14 +26,14 @@ func (l *LogLevelType) Value() LogLevelType { // Matches returns true if the v LogLevel is enabled by this LogLevel. Should be // used with logging sub levels. Is safe to use on nil value LogLevelTypes. If -// LogLevel is nill, will default to LogOff comparison. +// LogLevel is nil, will default to LogOff comparison. func (l *LogLevelType) Matches(v LogLevelType) bool { c := l.Value() return c&v == v } // AtLeast returns true if this LogLevel is at least high enough to satisfies v. -// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default +// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default // to LogOff comparison. func (l *LogLevelType) AtLeast(v LogLevelType) bool { c := l.Value() @@ -71,6 +71,12 @@ const ( // LogDebugWithRequestErrors states the SDK should log when service requests fail // to build, send, validate, or unmarshal. LogDebugWithRequestErrors + + // LogDebugWithEventStreamBody states the SDK should log EventStream + // request and response bodys. This should be used to log the EventStream + // wire unmarshaled message content of requests and responses made while + // using the SDK Will also enable LogDebug. + LogDebugWithEventStreamBody ) // A Logger is a minimalistic interface for the SDK to log messages to. Should diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go index 802ac88ad5..8ef8548a96 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -14,10 +14,12 @@ type Handlers struct { Send HandlerList ValidateResponse HandlerList Unmarshal HandlerList + UnmarshalStream HandlerList UnmarshalMeta HandlerList UnmarshalError HandlerList Retry HandlerList AfterRetry HandlerList + CompleteAttempt HandlerList Complete HandlerList } @@ -30,10 +32,12 @@ func (h *Handlers) Copy() Handlers { Send: h.Send.copy(), ValidateResponse: h.ValidateResponse.copy(), Unmarshal: h.Unmarshal.copy(), + UnmarshalStream: h.UnmarshalStream.copy(), UnmarshalError: h.UnmarshalError.copy(), UnmarshalMeta: h.UnmarshalMeta.copy(), Retry: h.Retry.copy(), AfterRetry: h.AfterRetry.copy(), + CompleteAttempt: h.CompleteAttempt.copy(), Complete: h.Complete.copy(), } } @@ -45,11 +49,13 @@ func (h *Handlers) Clear() { h.Send.Clear() h.Sign.Clear() h.Unmarshal.Clear() + h.UnmarshalStream.Clear() h.UnmarshalMeta.Clear() h.UnmarshalError.Clear() h.ValidateResponse.Clear() h.Retry.Clear() h.AfterRetry.Clear() + h.CompleteAttempt.Clear() h.Complete.Clear() } @@ -172,6 +178,21 @@ func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) { return swapped } +// Swap will swap out all handlers matching the name passed in. The matched +// handlers will be swapped in. True is returned if the handlers were swapped. +func (l *HandlerList) Swap(name string, replace NamedHandler) bool { + var swapped bool + + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == name { + l.list[i] = replace + swapped = true + } + } + + return swapped +} + // SetBackNamed will replace the named handler if it exists in the handler list. // If the handler does not exist the handler will be added to the end of the list. func (l *HandlerList) SetBackNamed(n NamedHandler) { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go index 02f07f4a46..b0c2ef4fe6 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -3,6 +3,8 @@ package request import ( "io" "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" ) // offsetReader is a thread-safe io.ReadCloser to prevent racing @@ -15,7 +17,7 @@ type offsetReader struct { func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { reader := &offsetReader{} - buf.Seek(offset, 0) + buf.Seek(offset, sdkio.SeekStart) reader.buf = buf return reader diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go index 299dc379d1..8f2eb3e43c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -4,7 +4,6 @@ import ( "bytes" "fmt" "io" - "net" "net/http" "net/url" "reflect" @@ -14,6 +13,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/internal/sdkio" ) const ( @@ -24,10 +24,14 @@ const ( // ErrCodeRead is an error that is returned during HTTP reads. ErrCodeRead = "ReadError" - // ErrCodeResponseTimeout is the connection timeout error that is recieved + // ErrCodeResponseTimeout is the connection timeout error that is received // during body reads. ErrCodeResponseTimeout = "ResponseTimeout" + // ErrCodeInvalidPresignExpire is returned when the expire time provided to + // presign is invalid + ErrCodeInvalidPresignExpire = "InvalidPresignExpireError" + // CanceledErrorCode is the error code that will be returned by an // API request that was canceled. Requests given a aws.Context may // return this error when canceled. @@ -41,8 +45,8 @@ type Request struct { Handlers Handlers Retryer + AttemptTime time.Time Time time.Time - ExpireTime time.Duration Operation *Operation HTTPRequest *http.Request HTTPResponse *http.Response @@ -60,6 +64,11 @@ type Request struct { LastSignedAt time.Time DisableFollowRedirects bool + // A value greater than 0 instructs the request to be signed as Presigned URL + // You should not set this field directly. Instead use Request's + // Presign or PresignRequest methods. + ExpireTime time.Duration + context aws.Context built bool @@ -104,6 +113,8 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) } + SanitizeHostForHeader(httpReq) + r := &Request{ Config: cfg, ClientInfo: clientInfo, @@ -214,6 +225,9 @@ func (r *Request) SetContext(ctx aws.Context) { // WillRetry returns if the request's can be retried. func (r *Request) WillRetry() bool { + if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody { + return false + } return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() } @@ -245,39 +259,74 @@ func (r *Request) SetStringBody(s string) { // SetReaderBody will set the request's body reader. func (r *Request) SetReaderBody(reader io.ReadSeeker) { r.Body = reader + r.BodyStart, _ = reader.Seek(0, sdkio.SeekCurrent) // Get the Bodies current offset. r.ResetBody() } // Presign returns the request's signed URL. Error will be returned -// if the signing fails. -func (r *Request) Presign(expireTime time.Duration) (string, error) { - r.ExpireTime = expireTime +// if the signing fails. The expire parameter is only used for presigned Amazon +// S3 API requests. All other AWS services will use a fixed expiration +// time of 15 minutes. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +func (r *Request) Presign(expire time.Duration) (string, error) { + r = r.copy() + + // Presign requires all headers be hoisted. There is no way to retrieve + // the signed headers not hoisted without this. Making the presigned URL + // useless. r.NotHoist = false + u, _, err := getPresignedURL(r, expire) + return u, err +} + +// PresignRequest behaves just like presign, with the addition of returning a +// set of headers that were signed. The expire parameter is only used for +// presigned Amazon S3 API requests. All other AWS services will use a fixed +// expiration time of 15 minutes. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +// +// Returns the URL string for the API operation with signature in the query string, +// and the HTTP headers that were included in the signature. These headers must +// be included in any HTTP request made with the presigned URL. +// +// To prevent hoisting any headers to the query string set NotHoist to true on +// this Request value prior to calling PresignRequest. +func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) { + r = r.copy() + return getPresignedURL(r, expire) +} + +// IsPresigned returns true if the request represents a presigned API url. +func (r *Request) IsPresigned() bool { + return r.ExpireTime != 0 +} + +func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) { + if expire <= 0 { + return "", nil, awserr.New( + ErrCodeInvalidPresignExpire, + "presigned URL requires an expire duration greater than 0", + nil, + ) + } + + r.ExpireTime = expire + if r.Operation.BeforePresignFn != nil { - r = r.copy() - err := r.Operation.BeforePresignFn(r) - if err != nil { - return "", err + if err := r.Operation.BeforePresignFn(r); err != nil { + return "", nil, err } } - r.Sign() - if r.Error != nil { - return "", r.Error + if err := r.Sign(); err != nil { + return "", nil, err } - return r.HTTPRequest.URL.String(), nil -} -// PresignRequest behaves just like presign, but hoists all headers and signs them. -// Also returns the signed hash back to the user -func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) { - r.ExpireTime = expireTime - r.NotHoist = true - r.Sign() - if r.Error != nil { - return "", nil, r.Error - } return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil } @@ -297,7 +346,7 @@ func debugLogReqError(r *Request, stage string, retrying bool, err error) { // Build will build the request's object so it can be signed and sent // to the service. Build will also validate all the request's parameters. -// Anny additional build Handlers set on this request will be run +// Any additional build Handlers set on this request will be run // in the order they were set. // // The request will only be built once. Multiple calls to build will have @@ -323,9 +372,9 @@ func (r *Request) Build() error { return r.Error } -// Sign will sign the request returning error if errors are encountered. +// Sign will sign the request, returning error if errors are encountered. // -// Send will build the request prior to signing. All Sign Handlers will +// Sign will build the request prior to signing. All Sign Handlers will // be executed in the order they were set. func (r *Request) Sign() error { r.Build() @@ -358,7 +407,7 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) { // of the SDK if they used that field. // // Related golang/go#18257 - l, err := computeBodyLength(r.Body) + l, err := aws.SeekerLen(r.Body) if err != nil { return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err) } @@ -376,7 +425,8 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) { // Transfer-Encoding: chunked bodies for these methods. // // This would only happen if a aws.ReaderSeekerCloser was used with - // a io.Reader that was not also an io.Seeker. + // a io.Reader that was not also an io.Seeker, or did not implement + // Len() method. switch r.Operation.HTTPMethod { case "GET", "HEAD", "DELETE": body = NoBody @@ -388,49 +438,13 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) { return body, nil } -// Attempts to compute the length of the body of the reader using the -// io.Seeker interface. If the value is not seekable because of being -// a ReaderSeekerCloser without an unerlying Seeker -1 will be returned. -// If no error occurs the length of the body will be returned. -func computeBodyLength(r io.ReadSeeker) (int64, error) { - seekable := true - // Determine if the seeker is actually seekable. ReaderSeekerCloser - // hides the fact that a io.Readers might not actually be seekable. - switch v := r.(type) { - case aws.ReaderSeekerCloser: - seekable = v.IsSeeker() - case *aws.ReaderSeekerCloser: - seekable = v.IsSeeker() - } - if !seekable { - return -1, nil - } - - curOffset, err := r.Seek(0, 1) - if err != nil { - return 0, err - } - - endOffset, err := r.Seek(0, 2) - if err != nil { - return 0, err - } - - _, err = r.Seek(curOffset, 0) - if err != nil { - return 0, err - } - - return endOffset - curOffset, nil -} - // GetBody will return an io.ReadSeeker of the Request's underlying // input body with a concurrency safe wrapper. func (r *Request) GetBody() io.ReadSeeker { return r.safeBody } -// Send will send the request returning error if errors are encountered. +// Send will send the request, returning error if errors are encountered. // // Send will sign the request prior to sending. All Send Handlers will // be executed in the order they were set. @@ -450,79 +464,78 @@ func (r *Request) Send() error { r.Handlers.Complete.Run(r) }() - for { - if aws.BoolValue(r.Retryable) { - if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { - r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", - r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) - } + if err := r.Error; err != nil { + return err + } - // The previous http.Request will have a reference to the r.Body - // and the HTTP Client's Transport may still be reading from - // the request's body even though the Client's Do returned. - r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) - r.ResetBody() + for { + r.Error = nil + r.AttemptTime = time.Now() - // Closing response body to ensure that no response body is leaked - // between retry attempts. - if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { - r.HTTPResponse.Body.Close() - } + if err := r.Sign(); err != nil { + debugLogReqError(r, "Sign Request", false, err) + return err } - r.Sign() - if r.Error != nil { - return r.Error - } - - r.Retryable = nil - - r.Handlers.Send.Run(r) - if r.Error != nil { - if !shouldRetryCancel(r) { - return r.Error - } - - err := r.Error + if err := r.sendRequest(); err == nil { + return nil + } else if !shouldRetryCancel(r.Error) { + return err + } else { r.Handlers.Retry.Run(r) r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - debugLogReqError(r, "Send Request", false, err) - return r.Error - } - debugLogReqError(r, "Send Request", true, err) - continue - } - r.Handlers.UnmarshalMeta.Run(r) - r.Handlers.ValidateResponse.Run(r) - if r.Error != nil { - r.Handlers.UnmarshalError.Run(r) - err := r.Error - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - debugLogReqError(r, "Validate Response", false, err) + if r.Error != nil || !aws.BoolValue(r.Retryable) { return r.Error } - debugLogReqError(r, "Validate Response", true, err) - continue - } - r.Handlers.Unmarshal.Run(r) - if r.Error != nil { - err := r.Error - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - debugLogReqError(r, "Unmarshal Response", false, err) - return r.Error - } - debugLogReqError(r, "Unmarshal Response", true, err) + r.prepareRetry() continue } + } +} + +func (r *Request) prepareRetry() { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + // The previous http.Request will have a reference to the r.Body + // and the HTTP Client's Transport may still be reading from + // the request's body even though the Client's Do returned. + r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) + r.ResetBody() - break + // Closing response body to ensure that no response body is leaked + // between retry attempts. + if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { + r.HTTPResponse.Body.Close() + } +} + +func (r *Request) sendRequest() (sendErr error) { + defer r.Handlers.CompleteAttempt.Run(r) + + r.Retryable = nil + r.Handlers.Send.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", r.WillRetry(), r.Error) + return r.Error + } + + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + r.Handlers.UnmarshalError.Run(r) + debugLogReqError(r, "Validate Response", r.WillRetry(), r.Error) + return r.Error + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", r.WillRetry(), r.Error) + return r.Error } return nil @@ -548,28 +561,113 @@ func AddToUserAgent(r *Request, s string) { r.HTTPRequest.Header.Set("User-Agent", s) } -func shouldRetryCancel(r *Request) bool { - awsErr, ok := r.Error.(awserr.Error) - timeoutErr := false - errStr := r.Error.Error() - if ok { - if awsErr.Code() == CanceledErrorCode { +type temporary interface { + Temporary() bool +} + +func shouldRetryCancel(err error) bool { + switch err := err.(type) { + case awserr.Error: + if err.Code() == CanceledErrorCode { return false } - err := awsErr.OrigErr() - netErr, netOK := err.(net.Error) - timeoutErr = netOK && netErr.Temporary() - if urlErr, ok := err.(*url.Error); !timeoutErr && ok { - errStr = urlErr.Err.Error() + return shouldRetryCancel(err.OrigErr()) + case *url.Error: + if strings.Contains(err.Error(), "connection refused") { + // Refused connections should be retried as the service may not yet + // be running on the port. Go TCP dial considers refused + // connections as not temporary. + return true + } + // *url.Error only implements Temporary after golang 1.6 but since + // url.Error only wraps the error: + return shouldRetryCancel(err.Err) + case temporary: + // If the error is temporary, we want to allow continuation of the + // retry process + return err.Temporary() + case nil: + // `awserr.Error.OrigErr()` can be nil, meaning there was an error but + // because we don't know the cause, it is marked as retriable. See + // TestRequest4xxUnretryable for an example. + return true + default: + switch err.Error() { + case "net/http: request canceled", + "net/http: request canceled while waiting for connection": + // known 1.5 error case when an http request is cancelled + return false } + // here we don't know the error; so we allow a retry. + return true + } +} + +// SanitizeHostForHeader removes default port from host and updates request.Host +func SanitizeHostForHeader(r *http.Request) { + host := getHost(r) + port := portOnly(host) + if port != "" && isDefaultPort(r.URL.Scheme, port) { + r.Host = stripPort(host) + } +} + +// Returns host from request +func getHost(r *http.Request) string { + if r.Host != "" { + return r.Host } - // There can be two types of canceled errors here. - // The first being a net.Error and the other being an error. - // If the request was timed out, we want to continue the retry - // process. Otherwise, return the canceled error. - return timeoutErr || - (errStr != "net/http: request canceled" && - errStr != "net/http: request canceled while waiting for connection") + return r.URL.Host +} + +// Hostname returns u.Host, without any port number. +// +// If Host is an IPv6 literal with a port number, Hostname returns the +// IPv6 literal without the square brackets. IPv6 literals may include +// a zone identifier. +// +// Copied from the Go 1.8 standard library (net/url) +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} + +// Port returns the port part of u.Host, without the leading colon. +// If u.Host doesn't contain a port, Port returns an empty string. +// +// Copied from the Go 1.8 standard library (net/url) +func portOnly(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return "" + } + if i := strings.Index(hostport, "]:"); i != -1 { + return hostport[i+len("]:"):] + } + if strings.Contains(hostport, "]") { + return "" + } + return hostport[colon+len(":"):] +} + +// Returns true if the specified URI is using the standard port +// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) +func isDefaultPort(scheme, port string) bool { + if port == "" { + return true + } + + lowerCaseScheme := strings.ToLower(scheme) + if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { + return true + } + return false } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go index 869b97a1a0..e36e468b7c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go @@ -21,7 +21,7 @@ func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } var NoBody = noBody{} // ResetBody rewinds the request body back to its starting position, and -// set's the HTTP Request body reference. When the body is read prior +// sets the HTTP Request body reference. When the body is read prior // to being sent in the HTTP request it will need to be rewound. // // ResetBody will automatically be called by the SDK's build handler, but if diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go index c32fc69bc5..7c6a8000f6 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go @@ -11,7 +11,7 @@ import ( var NoBody = http.NoBody // ResetBody rewinds the request body back to its starting position, and -// set's the HTTP Request body reference. When the body is read prior +// sets the HTTP Request body reference. When the body is read prior // to being sent in the HTTP request it will need to be rewound. // // ResetBody will automatically be called by the SDK's build handler, but if diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go index 59de6736b6..a633ed5acf 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -35,8 +35,12 @@ type Pagination struct { // NewRequest should always be built from the same API operations. It is // undefined if different API operations are returned on subsequent calls. NewRequest func() (*Request, error) + // EndPageOnSameToken, when enabled, will allow the paginator to stop on + // token that are the same as its previous tokens. + EndPageOnSameToken bool started bool + prevTokens []interface{} nextTokens []interface{} err error @@ -49,7 +53,15 @@ type Pagination struct { // // Will always return true if Next has not been called yet. func (p *Pagination) HasNextPage() bool { - return !(p.started && len(p.nextTokens) == 0) + if !p.started { + return true + } + + hasNextPage := len(p.nextTokens) != 0 + if p.EndPageOnSameToken { + return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens) + } + return hasNextPage } // Err returns the error Pagination encountered when retrieving the next page. @@ -96,6 +108,7 @@ func (p *Pagination) Next() bool { return false } + p.prevTokens = p.nextTokens p.nextTokens = req.nextPageTokens() p.curPage = req.Data @@ -142,13 +155,28 @@ func (r *Request) nextPageTokens() []interface{} { tokens := []interface{}{} tokenAdded := false for _, outToken := range r.Operation.OutputTokens { - v, _ := awsutil.ValuesAtPath(r.Data, outToken) - if len(v) > 0 { - tokens = append(tokens, v[0]) - tokenAdded = true - } else { + vs, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(vs) == 0 { tokens = append(tokens, nil) + continue + } + v := vs[0] + + switch tv := v.(type) { + case *string: + if len(aws.StringValue(tv)) == 0 { + tokens = append(tokens, nil) + continue + } + case string: + if len(tv) == 0 { + tokens = append(tokens, nil) + continue + } } + + tokenAdded = true + tokens = append(tokens, v) } if !tokenAdded { return nil diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go index 8d369c1b8c..7bc5da7826 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -8,7 +8,7 @@ import ( ) // Retryer is an interface to control retry logic for a given service. -// The default implementation used by most services is the service.DefaultRetryer +// The default implementation used by most services is the client.DefaultRetryer // structure, which contains basic retry logic using exponential backoff. type Retryer interface { RetryRules(*Request) time.Duration @@ -40,6 +40,7 @@ var throttleCodes = map[string]struct{}{ "RequestThrottled": {}, "TooManyRequestsException": {}, // Lambda functions "PriorRequestNotComplete": {}, // Route53 + "TransactionInProgressException": {}, } // credsExpiredCodes is a collection of error codes which signify the credentials @@ -70,8 +71,8 @@ func isCodeExpiredCreds(code string) bool { } var validParentCodes = map[string]struct{}{ - ErrCodeSerialization: struct{}{}, - ErrCodeRead: struct{}{}, + ErrCodeSerialization: {}, + ErrCodeRead: {}, } type temporaryError interface { @@ -97,7 +98,7 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool { } if t, ok := err.(temporaryError); ok { - return t.Temporary() + return t.Temporary() || isErrConnectionReset(err) } return isErrConnectionReset(err) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go index 2520286b75..8630683f31 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go @@ -17,6 +17,12 @@ const ( ParamMinValueErrCode = "ParamMinValueError" // ParamMinLenErrCode is the error code for fields without enough elements. ParamMinLenErrCode = "ParamMinLenError" + // ParamMaxLenErrCode is the error code for value being too long. + ParamMaxLenErrCode = "ParamMaxLenError" + + // ParamFormatErrCode is the error code for a field with invalid + // format or characters. + ParamFormatErrCode = "ParamFormatInvalidError" ) // Validator provides a way for types to perform validation logic on their @@ -220,7 +226,7 @@ type ErrParamMinLen struct { func NewErrParamMinLen(field string, min int) *ErrParamMinLen { return &ErrParamMinLen{ errInvalidParam: errInvalidParam{ - code: ParamMinValueErrCode, + code: ParamMinLenErrCode, field: field, msg: fmt.Sprintf("minimum field size of %v", min), }, @@ -232,3 +238,49 @@ func NewErrParamMinLen(field string, min int) *ErrParamMinLen { func (e *ErrParamMinLen) MinLen() int { return e.min } + +// An ErrParamMaxLen represents a maximum length parameter error. +type ErrParamMaxLen struct { + errInvalidParam + max int +} + +// NewErrParamMaxLen creates a new maximum length parameter error. +func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen { + return &ErrParamMaxLen{ + errInvalidParam: errInvalidParam{ + code: ParamMaxLenErrCode, + field: field, + msg: fmt.Sprintf("maximum size of %v, %v", max, value), + }, + max: max, + } +} + +// MaxLen returns the field's required minimum length. +func (e *ErrParamMaxLen) MaxLen() int { + return e.max +} + +// An ErrParamFormat represents a invalid format parameter error. +type ErrParamFormat struct { + errInvalidParam + format string +} + +// NewErrParamFormat creates a new invalid format parameter error. +func NewErrParamFormat(field string, format, value string) *ErrParamFormat { + return &ErrParamFormat{ + errInvalidParam: errInvalidParam{ + code: ParamFormatErrCode, + field: field, + msg: fmt.Sprintf("format %v, %v", format, value), + }, + format: format, + } +} + +// Format returns the field's required format. +func (e *ErrParamFormat) Format() string { + return e.format +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go index 22d2f80980..4601f883cc 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go @@ -79,8 +79,9 @@ type Waiter struct { MaxAttempts int Delay WaiterDelay - RequestOptions []Option - NewRequest func([]Option) (*Request, error) + RequestOptions []Option + NewRequest func([]Option) (*Request, error) + SleepWithContext func(aws.Context, time.Duration) error } // ApplyOptions updates the waiter with the list of waiter options provided. @@ -195,8 +196,15 @@ func (w Waiter) WaitWithContext(ctx aws.Context) error { if sleepFn := req.Config.SleepDelay; sleepFn != nil { // Support SleepDelay for backwards compatibility and testing sleepFn(delay) - } else if err := aws.SleepWithContext(ctx, delay); err != nil { - return awserr.New(CanceledErrorCode, "waiter context canceled", err) + } else { + sleepCtxFn := w.SleepWithContext + if sleepCtxFn == nil { + sleepCtxFn = aws.SleepWithContext + } + + if err := sleepCtxFn(ctx, delay); err != nil { + return awserr.New(CanceledErrorCode, "waiter context canceled", err) + } } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go index ea7b886f81..38a7b05a62 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -99,7 +99,7 @@ handler logs every request and its payload made by a service client: sess.Handlers.Send.PushFront(func(r *request.Request) { // Log every request made and its payload - logger.Println("Request: %s/%s, Payload: %s", + logger.Printf("Request: %s/%s, Payload: %s", r.ClientInfo.ServiceName, r.Operation, r.Params) }) @@ -128,7 +128,7 @@ read. The Session will be created from configuration values from the shared credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config). Credentials are the values the SDK should use for authenticating requests with -AWS Services. They arfrom a configuration file will need to include both +AWS Services. They are from a configuration file will need to include both aws_access_key_id and aws_secret_access_key must be provided together in the same file to be considered valid. The values will be ignored if not a complete group. aws_session_token is an optional field that can be provided if both of @@ -183,7 +183,7 @@ be returned when creating the session. // from assumed role. svc := s3.New(sess) -To setup assume role outside of a session see the stscrds.AssumeRoleProvider +To setup assume role outside of a session see the stscreds.AssumeRoleProvider documentation. Environment Variables diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index 7357e545ac..e3959b959e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -4,9 +4,14 @@ import ( "os" "strconv" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/defaults" ) +// EnvProviderName provides a name of the provider when config is loaded from environment. +const EnvProviderName = "EnvConfigCredentials" + // envConfig is a collection of environment values the SDK will read // setup config from. All environment values are optional. But some values // such as credentials require multiple values to be complete or the values @@ -75,8 +80,8 @@ type envConfig struct { // AWS_CONFIG_FILE=$HOME/my_shared_config SharedConfigFile string - // Sets the path to a custom Credentials Authroity (CA) Bundle PEM file - // that the SDK will use instead of the the system's root CA bundle. + // Sets the path to a custom Credentials Authority (CA) Bundle PEM file + // that the SDK will use instead of the system's root CA bundle. // Only use this if you want to configure the SDK to use a custom set // of CAs. // @@ -92,9 +97,29 @@ type envConfig struct { // // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle CustomCABundle string + + csmEnabled string + CSMEnabled bool + CSMPort string + CSMClientID string + + enableEndpointDiscovery string + // Enables endpoint discovery via environment variables. + // + // AWS_ENABLE_ENDPOINT_DISCOVERY=true + EnableEndpointDiscovery *bool } var ( + csmEnabledEnvKey = []string{ + "AWS_CSM_ENABLED", + } + csmPortEnvKey = []string{ + "AWS_CSM_PORT", + } + csmClientIDEnvKey = []string{ + "AWS_CSM_CLIENT_ID", + } credAccessEnvKey = []string{ "AWS_ACCESS_KEY_ID", "AWS_ACCESS_KEY", @@ -107,6 +132,10 @@ var ( "AWS_SESSION_TOKEN", } + enableEndpointDiscoveryEnvKey = []string{ + "AWS_ENABLE_ENDPOINT_DISCOVERY", + } + regionEnvKeys = []string{ "AWS_REGION", "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set @@ -153,11 +182,17 @@ func envConfigLoad(enableSharedConfig bool) envConfig { setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey) setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey) + // CSM environment variables + setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) + setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) + setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) + cfg.CSMEnabled = len(cfg.csmEnabled) > 0 + // Require logical grouping of credentials if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 { cfg.Creds = credentials.Value{} } else { - cfg.Creds.ProviderName = "EnvConfigCredentials" + cfg.Creds.ProviderName = EnvProviderName } regionKeys := regionEnvKeys @@ -170,9 +205,22 @@ func envConfigLoad(enableSharedConfig bool) envConfig { setFromEnvVal(&cfg.Region, regionKeys) setFromEnvVal(&cfg.Profile, profileKeys) + // endpoint discovery is in reference to it being enabled. + setFromEnvVal(&cfg.enableEndpointDiscovery, enableEndpointDiscoveryEnvKey) + if len(cfg.enableEndpointDiscovery) > 0 { + cfg.EnableEndpointDiscovery = aws.Bool(cfg.enableEndpointDiscovery != "false") + } + setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey) setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey) + if len(cfg.SharedCredentialsFile) == 0 { + cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() + } + if len(cfg.SharedConfigFile) == 0 { + cfg.SharedConfigFile = defaults.SharedConfigFilename() + } + cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") return cfg diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 9f75d5ac58..9bdbafd65c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -14,19 +14,39 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/processcreds" "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/csm" "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" ) +const ( + // ErrCodeSharedConfig represents an error that occurs in the shared + // configuration logic + ErrCodeSharedConfig = "SharedConfigErr" +) + +// ErrSharedConfigSourceCollision will be returned if a section contains both +// source_profile and credential_source +var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only source profile or credential source can be specified, not both", nil) + +// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment +// variables are empty and Environment was set as the credential source +var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil) + +// ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided +var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil) + // A Session provides a central location to create service clients from and // store configurations and request handlers for those services. // // Sessions are safe to create service clients concurrently, but it is not safe // to mutate the Session concurrently. // -// The Session satisfies the service client's client.ClientConfigProvider. +// The Session satisfies the service client's client.ConfigProvider. type Session struct { Config *aws.Config Handlers request.Handlers @@ -58,7 +78,12 @@ func New(cfgs ...*aws.Config) *Session { envCfg := loadEnvConfig() if envCfg.EnableSharedConfig { - s, err := newSession(Options{}, envCfg, cfgs...) + var cfg aws.Config + cfg.MergeIn(cfgs...) + s, err := NewSessionWithOptions(Options{ + Config: cfg, + SharedConfigState: SharedConfigEnable, + }) if err != nil { // Old session.New expected all errors to be discovered when // a request is made, and would report the errors then. This @@ -76,10 +101,16 @@ func New(cfgs ...*aws.Config) *Session { r.Error = err }) } + return s } - return deprecatedNewSession(cfgs...) + s := deprecatedNewSession(cfgs...) + if envCfg.CSMEnabled { + enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger) + } + + return s } // NewSession returns a new Session created from SDK defaults, config files, @@ -243,13 +274,6 @@ func NewSessionWithOptions(opts Options) (*Session, error) { envCfg.EnableSharedConfig = true } - if len(envCfg.SharedCredentialsFile) == 0 { - envCfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() - } - if len(envCfg.SharedConfigFile) == 0 { - envCfg.SharedConfigFile = defaults.SharedConfigFilename() - } - // Only use AWS_CA_BUNDLE if session option is not provided. if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil { f, err := os.Open(envCfg.CustomCABundle) @@ -302,10 +326,22 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session { } initHandlers(s) - return s } +func enableCSM(handlers *request.Handlers, clientID string, port string, logger aws.Logger) { + logger.Log("Enabling CSM") + if len(port) == 0 { + port = csm.DefaultPort + } + + r, err := csm.Start(clientID, "127.0.0.1:"+port) + if err != nil { + return + } + r.InjectHandlers(handlers) +} + func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { cfg := defaults.Config() handlers := defaults.Handlers() @@ -345,6 +381,9 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, } initHandlers(s) + if envCfg.CSMEnabled { + enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger) + } // Setup HTTP client with custom cert bundle if enabled if opts.CustomCABundle != nil { @@ -414,8 +453,67 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share } } + if cfg.EnableEndpointDiscovery == nil { + if envCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery) + } else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*sharedCfg.EnableEndpointDiscovery) + } + } + // Configure credentials if not already set if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { + + // inspect the profile to see if a credential source has been specified. + if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.CredentialSource) > 0 { + + // if both credential_source and source_profile have been set, return an error + // as this is undefined behavior. + if len(sharedCfg.AssumeRole.SourceProfile) > 0 { + return ErrSharedConfigSourceCollision + } + + // valid credential source values + const ( + credSourceEc2Metadata = "Ec2InstanceMetadata" + credSourceEnvironment = "Environment" + credSourceECSContainer = "EcsContainer" + ) + + switch sharedCfg.AssumeRole.CredentialSource { + case credSourceEc2Metadata: + cfgCp := *cfg + p := defaults.RemoteCredProvider(cfgCp, handlers) + cfgCp.Credentials = credentials.NewCredentials(p) + + if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil { + // AssumeRole Token provider is required if doing Assume Role + // with MFA. + return AssumeRoleTokenProviderNotSetError{} + } + + cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts) + case credSourceEnvironment: + cfg.Credentials = credentials.NewStaticCredentialsFromCreds( + envCfg.Creds, + ) + case credSourceECSContainer: + if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { + return ErrSharedConfigECSContainerEnvVarEmpty + } + + cfgCp := *cfg + p := defaults.RemoteCredProvider(cfgCp, handlers) + creds := credentials.NewCredentials(p) + + cfg.Credentials = creds + default: + return ErrSharedConfigInvalidCredSource + } + + return nil + } + if len(envCfg.Creds.AccessKeyID) > 0 { cfg.Credentials = credentials.NewStaticCredentialsFromCreds( envCfg.Creds, @@ -425,36 +523,22 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds( sharedCfg.AssumeRoleSource.Creds, ) + if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil { // AssumeRole Token provider is required if doing Assume Role // with MFA. return AssumeRoleTokenProviderNotSetError{} } - cfg.Credentials = stscreds.NewCredentials( - &Session{ - Config: &cfgCp, - Handlers: handlers.Copy(), - }, - sharedCfg.AssumeRole.RoleARN, - func(opt *stscreds.AssumeRoleProvider) { - opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName - - // Assume role with external ID - if len(sharedCfg.AssumeRole.ExternalID) > 0 { - opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID) - } - - // Assume role with MFA - if len(sharedCfg.AssumeRole.MFASerial) > 0 { - opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial) - opt.TokenProvider = sessOpts.AssumeRoleTokenProvider - } - }, - ) + + cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts) } else if len(sharedCfg.Creds.AccessKeyID) > 0 { cfg.Credentials = credentials.NewStaticCredentialsFromCreds( sharedCfg.Creds, ) + } else if len(sharedCfg.CredentialProcess) > 0 { + cfg.Credentials = processcreds.NewCredentials( + sharedCfg.CredentialProcess, + ) } else { // Fallback to default credentials provider, include mock errors // for the credential chain so user can identify why credentials @@ -473,6 +557,30 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share return nil } +func assumeRoleCredentials(cfg aws.Config, handlers request.Handlers, sharedCfg sharedConfig, sessOpts Options) *credentials.Credentials { + return stscreds.NewCredentials( + &Session{ + Config: &cfg, + Handlers: handlers.Copy(), + }, + sharedCfg.AssumeRole.RoleARN, + func(opt *stscreds.AssumeRoleProvider) { + opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName + + // Assume role with external ID + if len(sharedCfg.AssumeRole.ExternalID) > 0 { + opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID) + } + + // Assume role with MFA + if len(sharedCfg.AssumeRole.MFASerial) > 0 { + opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial) + opt.TokenProvider = sessOpts.AssumeRoleTokenProvider + } + }, + ) +} + // AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the // MFAToken option is not set when shared config is configured load assume a // role with an MFA token. @@ -573,11 +681,12 @@ func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) ( } return client.Config{ - Config: s.Config, - Handlers: s.Handlers, - Endpoint: resolved.URL, - SigningRegion: resolved.SigningRegion, - SigningName: resolved.SigningName, + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, }, err } @@ -597,10 +706,11 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf } return client.Config{ - Config: s.Config, - Handlers: s.Handlers, - Endpoint: resolved.URL, - SigningRegion: resolved.SigningRegion, - SigningName: resolved.SigningName, + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index 09c8e5bc7a..7cb44021b3 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -2,11 +2,11 @@ package session import ( "fmt" - "io/ioutil" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/go-ini/ini" + + "github.com/aws/aws-sdk-go/internal/ini" ) const ( @@ -16,15 +16,21 @@ const ( sessionTokenKey = `aws_session_token` // optional // Assume Role Credentials group - roleArnKey = `role_arn` // group required - sourceProfileKey = `source_profile` // group required - externalIDKey = `external_id` // optional - mfaSerialKey = `mfa_serial` // optional - roleSessionNameKey = `role_session_name` // optional + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required (or credential_source) + credentialSourceKey = `credential_source` // group required (or source_profile) + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional // Additional Config fields regionKey = `region` + // endpoint discovery group + enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional + // External Credential Process + credentialProcessKey = `credential_process` + // DefaultSharedConfigProfile is the default profile to be used when // loading configuration from the config files if another profile name // is not provided. @@ -32,11 +38,12 @@ const ( ) type assumeRoleConfig struct { - RoleARN string - SourceProfile string - ExternalID string - MFASerial string - RoleSessionName string + RoleARN string + SourceProfile string + CredentialSource string + ExternalID string + MFASerial string + RoleSessionName string } // sharedConfig represents the configuration fields of the SDK config files. @@ -55,16 +62,25 @@ type sharedConfig struct { AssumeRole assumeRoleConfig AssumeRoleSource *sharedConfig + // An external process to request credentials + CredentialProcess string + // Region is the region the SDK should use for looking up AWS service endpoints // and signing requests. // // region Region string + + // EnableEndpointDiscovery can be enabled in the shared config by setting + // endpoint_discovery_enabled to true + // + // endpoint_discovery_enabled = true + EnableEndpointDiscovery *bool } type sharedConfigFile struct { Filename string - IniData *ini.File + IniData ini.Sections } // loadSharedConfig retrieves the configuration from the list of files @@ -105,19 +121,16 @@ func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { files := make([]sharedConfigFile, 0, len(filenames)) for _, filename := range filenames { - b, err := ioutil.ReadFile(filename) - if err != nil { + sections, err := ini.OpenFile(filename) + if aerr, ok := err.(awserr.Error); ok && aerr.Code() == ini.ErrCodeUnableToReadFile { // Skip files which can't be opened and read for whatever reason continue - } - - f, err := ini.Load(b) - if err != nil { + } else if err != nil { return nil, SharedConfigLoadError{Filename: filename, Err: err} } files = append(files, sharedConfigFile{ - Filename: filename, IniData: f, + Filename: filename, IniData: sections, }) } @@ -127,6 +140,13 @@ func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error { var assumeRoleSrc sharedConfig + if len(cfg.AssumeRole.CredentialSource) > 0 { + // setAssumeRoleSource is only called when source_profile is found. + // If both source_profile and credential_source are set, then + // ErrSharedConfigSourceCollision will be returned + return ErrSharedConfigSourceCollision + } + // Multiple level assume role chains are not support if cfg.AssumeRole.SourceProfile == origProfile { assumeRoleSrc = *cfg @@ -171,45 +191,59 @@ func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFil // if a config file only includes aws_access_key_id but no aws_secret_access_key // the aws_access_key_id will be ignored. func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error { - section, err := file.IniData.GetSection(profile) - if err != nil { + section, ok := file.IniData.GetSection(profile) + if !ok { // Fallback to to alternate profile name: profile - section, err = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) - if err != nil { - return SharedConfigProfileNotExistsError{Profile: profile, Err: err} + section, ok = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) + if !ok { + return SharedConfigProfileNotExistsError{Profile: profile, Err: nil} } } // Shared Credentials - akid := section.Key(accessKeyIDKey).String() - secret := section.Key(secretAccessKey).String() + akid := section.String(accessKeyIDKey) + secret := section.String(secretAccessKey) if len(akid) > 0 && len(secret) > 0 { cfg.Creds = credentials.Value{ AccessKeyID: akid, SecretAccessKey: secret, - SessionToken: section.Key(sessionTokenKey).String(), + SessionToken: section.String(sessionTokenKey), ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), } } // Assume Role - roleArn := section.Key(roleArnKey).String() - srcProfile := section.Key(sourceProfileKey).String() - if len(roleArn) > 0 && len(srcProfile) > 0 { + roleArn := section.String(roleArnKey) + srcProfile := section.String(sourceProfileKey) + credentialSource := section.String(credentialSourceKey) + hasSource := len(srcProfile) > 0 || len(credentialSource) > 0 + if len(roleArn) > 0 && hasSource { cfg.AssumeRole = assumeRoleConfig{ - RoleARN: roleArn, - SourceProfile: srcProfile, - ExternalID: section.Key(externalIDKey).String(), - MFASerial: section.Key(mfaSerialKey).String(), - RoleSessionName: section.Key(roleSessionNameKey).String(), + RoleARN: roleArn, + SourceProfile: srcProfile, + CredentialSource: credentialSource, + ExternalID: section.String(externalIDKey), + MFASerial: section.String(mfaSerialKey), + RoleSessionName: section.String(roleSessionNameKey), } } + // `credential_process` + if credProc := section.String(credentialProcessKey); len(credProc) > 0 { + cfg.CredentialProcess = credProc + } + // Region - if v := section.Key(regionKey).String(); len(v) > 0 { + if v := section.String(regionKey); len(v) > 0 { cfg.Region = v } + // Endpoint discovery + if section.Has(enableEndpointDiscoveryKey) { + v := section.Bool(enableEndpointDiscoveryKey) + cfg.EnableEndpointDiscovery = &v + } + return nil } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index 434ac872de..523db79f8d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -45,7 +45,7 @@ // If signing a request intended for HTTP2 server, and you're using Go 1.6.2 // through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the // request URL. https://github.com/golang/go/issues/16847 points to a bug in -// Go pre 1.8 that failes to make HTTP2 requests using absolute URL in the HTTP +// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP // message. URL.Opaque generally will force Go to make requests with absolute URL. // URL.RawPath does not do this, but RawPath must be a valid escaping of Path // or url.EscapedPath will ignore the RawPath escaping. @@ -55,7 +55,6 @@ package v4 import ( - "bytes" "crypto/hmac" "crypto/sha256" "encoding/hex" @@ -72,6 +71,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" "github.com/aws/aws-sdk-go/private/protocol/rest" ) @@ -98,25 +98,25 @@ var ignoredHeaders = rules{ var requiredSignedHeaders = rules{ whitelist{ mapRule{ - "Cache-Control": struct{}{}, - "Content-Disposition": struct{}{}, - "Content-Encoding": struct{}{}, - "Content-Language": struct{}{}, - "Content-Md5": struct{}{}, - "Content-Type": struct{}{}, - "Expires": struct{}{}, - "If-Match": struct{}{}, - "If-Modified-Since": struct{}{}, - "If-None-Match": struct{}{}, - "If-Unmodified-Since": struct{}{}, - "Range": struct{}{}, - "X-Amz-Acl": struct{}{}, - "X-Amz-Copy-Source": struct{}{}, - "X-Amz-Copy-Source-If-Match": struct{}{}, - "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, - "X-Amz-Copy-Source-If-None-Match": struct{}{}, - "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, - "X-Amz-Copy-Source-Range": struct{}{}, + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, @@ -134,7 +134,9 @@ var requiredSignedHeaders = rules{ "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Tagging": struct{}{}, "X-Amz-Website-Redirect-Location": struct{}{}, + "X-Amz-Content-Sha256": struct{}{}, }, }, patterns{"X-Amz-Meta-"}, @@ -180,7 +182,7 @@ type Signer struct { // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html DisableURIPathEscaping bool - // Disales the automatical setting of the HTTP request's Body field with the + // Disables the automatical setting of the HTTP request's Body field with the // io.ReadSeeker passed in to the signer. This is useful if you're using a // custom wrapper around the body for the io.ReadSeeker and want to preserve // the Body value on the Request.Body. @@ -269,7 +271,7 @@ type signingCtx struct { // "X-Amz-Content-Sha256" header with a precomputed value. The signer will // only compute the hash if the request header value is empty. func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { - return v4.signWithBody(r, body, service, region, 0, signTime) + return v4.signWithBody(r, body, service, region, 0, false, signTime) } // Presign signs AWS v4 requests with the provided body, service name, region @@ -303,10 +305,10 @@ func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region strin // presigned request's signature you can set the "X-Amz-Content-Sha256" // HTTP header and that will be included in the request's signature. func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { - return v4.signWithBody(r, body, service, region, exp, signTime) + return v4.signWithBody(r, body, service, region, exp, true, signTime) } -func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { +func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) { currentTimeFn := v4.currentTimeFn if currentTimeFn == nil { currentTimeFn = time.Now @@ -318,7 +320,7 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi Query: r.URL.Query(), Time: signTime, ExpireTime: exp, - isPresign: exp != 0, + isPresign: isPresign, ServiceName: service, Region: region, DisableURIPathEscaping: v4.DisableURIPathEscaping, @@ -340,8 +342,11 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi return http.Header{}, err } + ctx.sanitizeHostForHeader() ctx.assignAmzQueryValues() - ctx.build(v4.DisableHeaderHoisting) + if err := ctx.build(v4.DisableHeaderHoisting); err != nil { + return nil, err + } // If the request is not presigned the body should be attached to it. This // prevents the confusion of wanting to send a signed request without @@ -364,6 +369,10 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi return ctx.SignedHeaderVals, nil } +func (ctx *signingCtx) sanitizeHostForHeader() { + request.SanitizeHostForHeader(ctx.Request) +} + func (ctx *signingCtx) handlePresignRemoval() { if !ctx.isPresign { return @@ -402,7 +411,7 @@ var SignRequestHandler = request.NamedHandler{ } // SignSDKRequest signs an AWS request with the V4 signature. This -// request handler is bested used only with the SDK's built in service client's +// request handler should only be used with the SDK's built in service client's // API operation requests. // // This function should not be used on its on its own, but in conjunction with @@ -413,7 +422,7 @@ var SignRequestHandler = request.NamedHandler{ // If the credentials of the request's config are set to // credentials.AnonymousCredentials the request will not be signed. func SignSDKRequest(req *request.Request) { - signSDKRequestWithCurrTime(req, time.Now) + SignSDKRequestWithCurrentTime(req, time.Now) } // BuildNamedHandler will build a generic handler for signing. @@ -421,12 +430,15 @@ func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler return request.NamedHandler{ Name: name, Fn: func(req *request.Request) { - signSDKRequestWithCurrTime(req, time.Now, opts...) + SignSDKRequestWithCurrentTime(req, time.Now, opts...) }, } } -func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { +// SignSDKRequestWithCurrentTime will sign the SDK's request using the time +// function passed in. Behaves the same as SignSDKRequest with the exception +// the request is signed with the value returned by the current time function. +func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { // If the request does not need to be signed ignore the signing of the // request if the AnonymousCredentials object is used. if req.Config.Credentials == credentials.AnonymousCredentials { @@ -462,13 +474,9 @@ func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time opt(v4) } - signingTime := req.Time - if !req.LastSignedAt.IsZero() { - signingTime = req.LastSignedAt - } - + curTime := curTimeFn() signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), - name, region, req.ExpireTime, signingTime, + name, region, req.ExpireTime, req.ExpireTime > 0, curTime, ) if err != nil { req.Error = err @@ -477,7 +485,7 @@ func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time } req.SignedHeaderVals = signedHeaders - req.LastSignedAt = curTimeFn() + req.LastSignedAt = curTime } const logSignInfoMsg = `DEBUG: Request Signature: @@ -499,10 +507,14 @@ func (v4 *Signer) logSigningInfo(ctx *signingCtx) { v4.Logger.Log(msg) } -func (ctx *signingCtx) build(disableHeaderHoisting bool) { +func (ctx *signingCtx) build(disableHeaderHoisting bool) error { ctx.buildTime() // no depends ctx.buildCredentialString() // no depends + if err := ctx.buildBodyDigest(); err != nil { + return err + } + unsignedHeaders := ctx.Request.Header if ctx.isPresign { if !disableHeaderHoisting { @@ -514,7 +526,6 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) { } } - ctx.buildBodyDigest() ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) ctx.buildCanonicalString() // depends on canon headers / signed headers ctx.buildStringToSign() // depends on canon string @@ -530,6 +541,8 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) { } ctx.Request.Header.Set("Authorization", strings.Join(parts, ", ")) } + + return nil } func (ctx *signingCtx) buildTime() { @@ -604,14 +617,18 @@ func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { headerValues := make([]string, len(headers)) for i, k := range headers { if k == "host" { - headerValues[i] = "host:" + ctx.Request.URL.Host + if ctx.Request.Host != "" { + headerValues[i] = "host:" + ctx.Request.Host + } else { + headerValues[i] = "host:" + ctx.Request.URL.Host + } } else { headerValues[i] = k + ":" + strings.Join(ctx.SignedHeaderVals[k], ",") } } - - ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n") + stripExcessSpaces(headerValues) + ctx.canonicalHeaders = strings.Join(headerValues, "\n") } func (ctx *signingCtx) buildCanonicalString() { @@ -652,21 +669,34 @@ func (ctx *signingCtx) buildSignature() { ctx.signature = hex.EncodeToString(signature) } -func (ctx *signingCtx) buildBodyDigest() { +func (ctx *signingCtx) buildBodyDigest() error { hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") if hash == "" { - if ctx.unsignedPayload || (ctx.isPresign && ctx.ServiceName == "s3") { + includeSHA256Header := ctx.unsignedPayload || + ctx.ServiceName == "s3" || + ctx.ServiceName == "glacier" + + s3Presign := ctx.isPresign && ctx.ServiceName == "s3" + + if ctx.unsignedPayload || s3Presign { hash = "UNSIGNED-PAYLOAD" + includeSHA256Header = !s3Presign } else if ctx.Body == nil { hash = emptyStringSHA256 } else { + if !aws.IsReaderSeekable(ctx.Body) { + return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) + } hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) } - if ctx.unsignedPayload || ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" { + + if includeSHA256Header { ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) } } ctx.bodyDigest = hash + + return nil } // isRequestSigned returns if the request is currently signed or presigned @@ -706,56 +736,61 @@ func makeSha256(data []byte) []byte { func makeSha256Reader(reader io.ReadSeeker) []byte { hash := sha256.New() - start, _ := reader.Seek(0, 1) - defer reader.Seek(start, 0) + start, _ := reader.Seek(0, sdkio.SeekCurrent) + defer reader.Seek(start, sdkio.SeekStart) + + // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies + // smaller than 32KB. Fall back to io.Copy if we fail to determine the size. + size, err := aws.SeekerLen(reader) + if err != nil { + io.Copy(hash, reader) + } else { + io.CopyN(hash, reader, size) + } - io.Copy(hash, reader) return hash.Sum(nil) } -const doubleSpaces = " " - -var doubleSpaceBytes = []byte(doubleSpaces) +const doubleSpace = " " -func stripExcessSpaces(headerVals []string) []string { - vals := make([]string, len(headerVals)) - for i, str := range headerVals { - // Trim leading and trailing spaces - trimmed := strings.TrimSpace(str) +// stripExcessSpaces will rewrite the passed in slice's string values to not +// contain multiple side-by-side spaces. +func stripExcessSpaces(vals []string) { + var j, k, l, m, spaces int + for i, str := range vals { + // Trim trailing spaces + for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { + } - idx := strings.Index(trimmed, doubleSpaces) - var buf []byte - for idx > -1 { - // Multiple adjacent spaces found - if buf == nil { - // first time create the buffer - buf = []byte(trimmed) - } + // Trim leading spaces + for k = 0; k < j && str[k] == ' '; k++ { + } + str = str[k : j+1] - stripToIdx := -1 - for j := idx + 1; j < len(buf); j++ { - if buf[j] != ' ' { - buf = append(buf[:idx+1], buf[j:]...) - stripToIdx = j - break - } - } + // Strip multiple spaces. + j = strings.Index(str, doubleSpace) + if j < 0 { + vals[i] = str + continue + } - if stripToIdx >= 0 { - idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes) - if idx >= 0 { - idx += stripToIdx + buf := []byte(str) + for k, m, l = j, j, len(buf); k < l; k++ { + if buf[k] == ' ' { + if spaces == 0 { + // First space. + buf[m] = buf[k] + m++ } + spaces++ } else { - idx = -1 + // End of multiple spaces. + spaces = 0 + buf[m] = buf[k] + m++ } } - if buf != nil { - vals[i] = string(buf) - } else { - vals[i] = trimmed - } + vals[i] = string(buf[:m]) } - return vals } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go index 0e2d864e10..8b6f23425a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -3,6 +3,8 @@ package aws import ( "io" "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" ) // ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should @@ -22,6 +24,22 @@ type ReaderSeekerCloser struct { r io.Reader } +// IsReaderSeekable returns if the underlying reader type can be seeked. A +// io.Reader might not actually be seekable if it is the ReaderSeekerCloser +// type. +func IsReaderSeekable(r io.Reader) bool { + switch v := r.(type) { + case ReaderSeekerCloser: + return v.IsSeeker() + case *ReaderSeekerCloser: + return v.IsSeeker() + case io.ReadSeeker: + return true + default: + return false + } +} + // Read reads from the reader up to size of p. The number of bytes read, and // error if it occurred will be returned. // @@ -56,6 +74,71 @@ func (r ReaderSeekerCloser) IsSeeker() bool { return ok } +// HasLen returns the length of the underlying reader if the value implements +// the Len() int method. +func (r ReaderSeekerCloser) HasLen() (int, bool) { + type lenner interface { + Len() int + } + + if lr, ok := r.r.(lenner); ok { + return lr.Len(), true + } + + return 0, false +} + +// GetLen returns the length of the bytes remaining in the underlying reader. +// Checks first for Len(), then io.Seeker to determine the size of the +// underlying reader. +// +// Will return -1 if the length cannot be determined. +func (r ReaderSeekerCloser) GetLen() (int64, error) { + if l, ok := r.HasLen(); ok { + return int64(l), nil + } + + if s, ok := r.r.(io.Seeker); ok { + return seekerLen(s) + } + + return -1, nil +} + +// SeekerLen attempts to get the number of bytes remaining at the seeker's +// current position. Returns the number of bytes remaining or error. +func SeekerLen(s io.Seeker) (int64, error) { + // Determine if the seeker is actually seekable. ReaderSeekerCloser + // hides the fact that a io.Readers might not actually be seekable. + switch v := s.(type) { + case ReaderSeekerCloser: + return v.GetLen() + case *ReaderSeekerCloser: + return v.GetLen() + } + + return seekerLen(s) +} + +func seekerLen(s io.Seeker) (int64, error) { + curOffset, err := s.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + endOffset, err := s.Seek(0, sdkio.SeekEnd) + if err != nil { + return 0, err + } + + _, err = s.Seek(curOffset, sdkio.SeekStart) + if err != nil { + return 0, err + } + + return endOffset - curOffset, nil +} + // Close closes the ReaderSeekerCloser. // // If the ReaderSeekerCloser is not an io.Closer nothing will be done. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index bbf4c2f125..ab6cee169a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.8.34" +const SDKVersion = "1.16.36" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go new file mode 100644 index 0000000000..e83a99886b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go @@ -0,0 +1,120 @@ +package ini + +// ASTKind represents different states in the parse table +// and the type of AST that is being constructed +type ASTKind int + +// ASTKind* is used in the parse table to transition between +// the different states +const ( + ASTKindNone = ASTKind(iota) + ASTKindStart + ASTKindExpr + ASTKindEqualExpr + ASTKindStatement + ASTKindSkipStatement + ASTKindExprStatement + ASTKindSectionStatement + ASTKindNestedSectionStatement + ASTKindCompletedNestedSectionStatement + ASTKindCommentStatement + ASTKindCompletedSectionStatement +) + +func (k ASTKind) String() string { + switch k { + case ASTKindNone: + return "none" + case ASTKindStart: + return "start" + case ASTKindExpr: + return "expr" + case ASTKindStatement: + return "stmt" + case ASTKindSectionStatement: + return "section_stmt" + case ASTKindExprStatement: + return "expr_stmt" + case ASTKindCommentStatement: + return "comment" + case ASTKindNestedSectionStatement: + return "nested_section_stmt" + case ASTKindCompletedSectionStatement: + return "completed_stmt" + case ASTKindSkipStatement: + return "skip" + default: + return "" + } +} + +// AST interface allows us to determine what kind of node we +// are on and casting may not need to be necessary. +// +// The root is always the first node in Children +type AST struct { + Kind ASTKind + Root Token + RootToken bool + Children []AST +} + +func newAST(kind ASTKind, root AST, children ...AST) AST { + return AST{ + Kind: kind, + Children: append([]AST{root}, children...), + } +} + +func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST { + return AST{ + Kind: kind, + Root: root, + RootToken: true, + Children: children, + } +} + +// AppendChild will append to the list of children an AST has. +func (a *AST) AppendChild(child AST) { + a.Children = append(a.Children, child) +} + +// GetRoot will return the root AST which can be the first entry +// in the children list or a token. +func (a *AST) GetRoot() AST { + if a.RootToken { + return *a + } + + if len(a.Children) == 0 { + return AST{} + } + + return a.Children[0] +} + +// GetChildren will return the current AST's list of children +func (a *AST) GetChildren() []AST { + if len(a.Children) == 0 { + return []AST{} + } + + if a.RootToken { + return a.Children + } + + return a.Children[1:] +} + +// SetChildren will set and override all children of the AST. +func (a *AST) SetChildren(children []AST) { + if a.RootToken { + a.Children = children + } else { + a.Children = append(a.Children[:1], children...) + } +} + +// Start is used to indicate the starting state of the parse table. +var Start = newAST(ASTKindStart, AST{}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go new file mode 100644 index 0000000000..0895d53cbe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go @@ -0,0 +1,11 @@ +package ini + +var commaRunes = []rune(",") + +func isComma(b rune) bool { + return b == ',' +} + +func newCommaToken() Token { + return newToken(TokenComma, commaRunes, NoneType) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go new file mode 100644 index 0000000000..0b76999ba1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go @@ -0,0 +1,35 @@ +package ini + +// isComment will return whether or not the next byte(s) is a +// comment. +func isComment(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case ';': + return true + case '#': + return true + } + + return false +} + +// newCommentToken will create a comment token and +// return how many bytes were read. +func newCommentToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if b[i] == '\n' { + break + } + + if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' { + break + } + } + + return newToken(TokenComment, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go new file mode 100644 index 0000000000..25ce0fe134 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go @@ -0,0 +1,29 @@ +// Package ini is an LL(1) parser for configuration files. +// +// Example: +// sections, err := ini.OpenFile("/path/to/file") +// if err != nil { +// panic(err) +// } +// +// profile := "foo" +// section, ok := sections.GetSection(profile) +// if !ok { +// fmt.Printf("section %q could not be found", profile) +// } +// +// Below is the BNF that describes this parser +// Grammar: +// stmt -> value stmt' +// stmt' -> epsilon | op stmt +// value -> number | string | boolean | quoted_string +// +// section -> [ section' +// section' -> value section_close +// section_close -> ] +// +// SkipState will skip (NL WS)+ +// +// comment -> # comment' | ; comment' +// comment' -> epsilon | value +package ini diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go new file mode 100644 index 0000000000..04345a54c2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go @@ -0,0 +1,4 @@ +package ini + +// emptyToken is used to satisfy the Token interface +var emptyToken = newToken(TokenNone, []rune{}, NoneType) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go new file mode 100644 index 0000000000..91ba2a59dd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go @@ -0,0 +1,24 @@ +package ini + +// newExpression will return an expression AST. +// Expr represents an expression +// +// grammar: +// expr -> string | number +func newExpression(tok Token) AST { + return newASTWithRootToken(ASTKindExpr, tok) +} + +func newEqualExpr(left AST, tok Token) AST { + return newASTWithRootToken(ASTKindEqualExpr, tok, left) +} + +// EqualExprKey will return a LHS value in the equal expr +func EqualExprKey(ast AST) string { + children := ast.GetChildren() + if len(children) == 0 || ast.Kind != ASTKindEqualExpr { + return "" + } + + return string(children[0].Root.Raw()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go new file mode 100644 index 0000000000..8d462f77e2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go @@ -0,0 +1,17 @@ +// +build gofuzz + +package ini + +import ( + "bytes" +) + +func Fuzz(data []byte) int { + b := bytes.NewReader(data) + + if _, err := Parse(b); err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go new file mode 100644 index 0000000000..3b0ca7afe3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go @@ -0,0 +1,51 @@ +package ini + +import ( + "io" + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// OpenFile takes a path to a given file, and will open and parse +// that file. +func OpenFile(path string) (Sections, error) { + f, err := os.Open(path) + if err != nil { + return Sections{}, awserr.New(ErrCodeUnableToReadFile, "unable to open file", err) + } + defer f.Close() + + return Parse(f) +} + +// Parse will parse the given file using the shared config +// visitor. +func Parse(f io.Reader) (Sections, error) { + tree, err := ParseAST(f) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor() + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} + +// ParseBytes will parse the given bytes and return the parsed sections. +func ParseBytes(b []byte) (Sections, error) { + tree, err := ParseASTBytes(b) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor() + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go new file mode 100644 index 0000000000..582c024ad1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go @@ -0,0 +1,165 @@ +package ini + +import ( + "bytes" + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // ErrCodeUnableToReadFile is used when a file is failed to be + // opened or read from. + ErrCodeUnableToReadFile = "FailedRead" +) + +// TokenType represents the various different tokens types +type TokenType int + +func (t TokenType) String() string { + switch t { + case TokenNone: + return "none" + case TokenLit: + return "literal" + case TokenSep: + return "sep" + case TokenOp: + return "op" + case TokenWS: + return "ws" + case TokenNL: + return "newline" + case TokenComment: + return "comment" + case TokenComma: + return "comma" + default: + return "" + } +} + +// TokenType enums +const ( + TokenNone = TokenType(iota) + TokenLit + TokenSep + TokenComma + TokenOp + TokenWS + TokenNL + TokenComment +) + +type iniLexer struct{} + +// Tokenize will return a list of tokens during lexical analysis of the +// io.Reader. +func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New(ErrCodeUnableToReadFile, "unable to read file", err) + } + + return l.tokenize(b) +} + +func (l *iniLexer) tokenize(b []byte) ([]Token, error) { + runes := bytes.Runes(b) + var err error + n := 0 + tokenAmount := countTokens(runes) + tokens := make([]Token, tokenAmount) + count := 0 + + for len(runes) > 0 && count < tokenAmount { + switch { + case isWhitespace(runes[0]): + tokens[count], n, err = newWSToken(runes) + case isComma(runes[0]): + tokens[count], n = newCommaToken(), 1 + case isComment(runes): + tokens[count], n, err = newCommentToken(runes) + case isNewline(runes): + tokens[count], n, err = newNewlineToken(runes) + case isSep(runes): + tokens[count], n, err = newSepToken(runes) + case isOp(runes): + tokens[count], n, err = newOpToken(runes) + default: + tokens[count], n, err = newLitToken(runes) + } + + if err != nil { + return nil, err + } + + count++ + + runes = runes[n:] + } + + return tokens[:count], nil +} + +func countTokens(runes []rune) int { + count, n := 0, 0 + var err error + + for len(runes) > 0 { + switch { + case isWhitespace(runes[0]): + _, n, err = newWSToken(runes) + case isComma(runes[0]): + _, n = newCommaToken(), 1 + case isComment(runes): + _, n, err = newCommentToken(runes) + case isNewline(runes): + _, n, err = newNewlineToken(runes) + case isSep(runes): + _, n, err = newSepToken(runes) + case isOp(runes): + _, n, err = newOpToken(runes) + default: + _, n, err = newLitToken(runes) + } + + if err != nil { + return 0 + } + + count++ + runes = runes[n:] + } + + return count + 1 +} + +// Token indicates a metadata about a given value. +type Token struct { + t TokenType + ValueType ValueType + base int + raw []rune +} + +var emptyValue = Value{} + +func newToken(t TokenType, raw []rune, v ValueType) Token { + return Token{ + t: t, + raw: raw, + ValueType: v, + } +} + +// Raw return the raw runes that were consumed +func (tok Token) Raw() []rune { + return tok.raw +} + +// Type returns the token type +func (tok Token) Type() TokenType { + return tok.t +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go new file mode 100644 index 0000000000..f99703372c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go @@ -0,0 +1,347 @@ +package ini + +import ( + "fmt" + "io" +) + +// State enums for the parse table +const ( + InvalidState = iota + // stmt -> value stmt' + StatementState + // stmt' -> MarkComplete | op stmt + StatementPrimeState + // value -> number | string | boolean | quoted_string + ValueState + // section -> [ section' + OpenScopeState + // section' -> value section_close + SectionState + // section_close -> ] + CloseScopeState + // SkipState will skip (NL WS)+ + SkipState + // SkipTokenState will skip any token and push the previous + // state onto the stack. + SkipTokenState + // comment -> # comment' | ; comment' + // comment' -> MarkComplete | value + CommentState + // MarkComplete state will complete statements and move that + // to the completed AST list + MarkCompleteState + // TerminalState signifies that the tokens have been fully parsed + TerminalState +) + +// parseTable is a state machine to dictate the grammar above. +var parseTable = map[ASTKind]map[TokenType]int{ + ASTKindStart: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, + ASTKindCommentStatement: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExpr: map[TokenType]int{ + TokenOp: StatementPrimeState, + TokenLit: ValueState, + TokenSep: OpenScopeState, + TokenWS: ValueState, + TokenNL: SkipState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindEqualExpr: map[TokenType]int{ + TokenLit: ValueState, + TokenWS: SkipTokenState, + TokenNL: SkipState, + }, + ASTKindStatement: map[TokenType]int{ + TokenLit: SectionState, + TokenSep: CloseScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExprStatement: map[TokenType]int{ + TokenLit: ValueState, + TokenSep: OpenScopeState, + TokenOp: ValueState, + TokenWS: ValueState, + TokenNL: MarkCompleteState, + TokenComment: CommentState, + TokenNone: TerminalState, + TokenComma: SkipState, + }, + ASTKindSectionStatement: map[TokenType]int{ + TokenLit: SectionState, + TokenOp: SectionState, + TokenSep: CloseScopeState, + TokenWS: SectionState, + TokenNL: SkipTokenState, + }, + ASTKindCompletedSectionStatement: map[TokenType]int{ + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindSkipStatement: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, +} + +// ParseAST will parse input from an io.Reader using +// an LL(1) parser. +func ParseAST(r io.Reader) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.Tokenize(r) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +// ParseASTBytes will parse input from a byte slice using +// an LL(1) parser. +func ParseASTBytes(b []byte) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.tokenize(b) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +func parse(tokens []Token) ([]AST, error) { + start := Start + stack := newParseStack(3, len(tokens)) + + stack.Push(start) + s := newSkipper() + +loop: + for stack.Len() > 0 { + k := stack.Pop() + + var tok Token + if len(tokens) == 0 { + // this occurs when all the tokens have been processed + // but reduction of what's left on the stack needs to + // occur. + tok = emptyToken + } else { + tok = tokens[0] + } + + step := parseTable[k.Kind][tok.Type()] + if s.ShouldSkip(tok) { + // being in a skip state with no tokens will break out of + // the parse loop since there is nothing left to process. + if len(tokens) == 0 { + break loop + } + + step = SkipTokenState + } + + switch step { + case TerminalState: + // Finished parsing. Push what should be the last + // statement to the stack. If there is anything left + // on the stack, an error in parsing has occurred. + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + break loop + case SkipTokenState: + // When skipping a token, the previous state was popped off the stack. + // To maintain the correct state, the previous state will be pushed + // onto the stack. + stack.Push(k) + case StatementState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + expr := newExpression(tok) + stack.Push(expr) + case StatementPrimeState: + if tok.Type() != TokenOp { + stack.MarkComplete(k) + continue + } + + if k.Kind != ASTKindExpr { + return nil, NewParseError( + fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k), + ) + } + + k = trimSpaces(k) + expr := newEqualExpr(k, tok) + stack.Push(expr) + case ValueState: + // ValueState requires the previous state to either be an equal expression + // or an expression statement. + // + // This grammar occurs when the RHS is a number, word, or quoted string. + // equal_expr -> lit op equal_expr' + // equal_expr' -> number | string | quoted_string + // quoted_string -> " quoted_string' + // quoted_string' -> string quoted_string_end + // quoted_string_end -> " + // + // otherwise + // expr_stmt -> equal_expr (expr_stmt')* + // expr_stmt' -> ws S | op S | MarkComplete + // S -> equal_expr' expr_stmt' + switch k.Kind { + case ASTKindEqualExpr: + // assiging a value to some key + k.AppendChild(newExpression(tok)) + stack.Push(newExprStatement(k)) + case ASTKindExpr: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stack.Push(k) + case ASTKindExprStatement: + root := k.GetRoot() + children := root.GetChildren() + if len(children) == 0 { + return nil, NewParseError( + fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind), + ) + } + + rhs := children[len(children)-1] + + if rhs.Root.ValueType != QuotedStringType { + rhs.Root.ValueType = StringType + rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...) + + } + + children[len(children)-1] = rhs + k.SetChildren(children) + + stack.Push(k) + } + case OpenScopeState: + if !runeCompare(tok.Raw(), openBrace) { + return nil, NewParseError("expected '['") + } + + stmt := newStatement() + stack.Push(stmt) + case CloseScopeState: + if !runeCompare(tok.Raw(), closeBrace) { + return nil, NewParseError("expected ']'") + } + + k = trimSpaces(k) + stack.Push(newCompletedSectionStatement(k)) + case SectionState: + var stmt AST + + switch k.Kind { + case ASTKindStatement: + // If there are multiple literals inside of a scope declaration, + // then the current token's raw value will be appended to the Name. + // + // This handles cases like [ profile default ] + // + // k will represent a SectionStatement with the children representing + // the label of the section + stmt = newSectionStatement(tok) + case ASTKindSectionStatement: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stmt = k + default: + return nil, NewParseError( + fmt.Sprintf("invalid statement: expected statement: %v", k.Kind), + ) + } + + stack.Push(stmt) + case MarkCompleteState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + + if stack.Len() == 0 { + stack.Push(start) + } + case SkipState: + stack.Push(newSkipStatement(k)) + s.Skip() + case CommentState: + if k.Kind == ASTKindStart { + stack.Push(k) + } else { + stack.MarkComplete(k) + } + + stmt := newCommentStatement(tok) + stack.Push(stmt) + default: + return nil, NewParseError(fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", k, tok)) + } + + if len(tokens) > 0 { + tokens = tokens[1:] + } + } + + // this occurs when a statement has not been completed + if stack.top > 1 { + return nil, NewParseError(fmt.Sprintf("incomplete expression: %v", stack.container)) + } + + // returns a sublist which excludes the start symbol + return stack.List(), nil +} + +// trimSpaces will trim spaces on the left and right hand side of +// the literal. +func trimSpaces(k AST) AST { + // trim left hand side of spaces + for i := 0; i < len(k.Root.raw); i++ { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[1:] + i-- + } + + // trim right hand side of spaces + for i := len(k.Root.raw) - 1; i >= 0; i-- { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[:len(k.Root.raw)-1] + } + + return k +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go new file mode 100644 index 0000000000..24df543d38 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go @@ -0,0 +1,324 @@ +package ini + +import ( + "fmt" + "strconv" + "strings" +) + +var ( + runesTrue = []rune("true") + runesFalse = []rune("false") +) + +var literalValues = [][]rune{ + runesTrue, + runesFalse, +} + +func isBoolValue(b []rune) bool { + for _, lv := range literalValues { + if isLitValue(lv, b) { + return true + } + } + return false +} + +func isLitValue(want, have []rune) bool { + if len(have) < len(want) { + return false + } + + for i := 0; i < len(want); i++ { + if want[i] != have[i] { + return false + } + } + + return true +} + +// isNumberValue will return whether not the leading characters in +// a byte slice is a number. A number is delimited by whitespace or +// the newline token. +// +// A number is defined to be in a binary, octal, decimal (int | float), hex format, +// or in scientific notation. +func isNumberValue(b []rune) bool { + negativeIndex := 0 + helper := numberHelper{} + needDigit := false + + for i := 0; i < len(b); i++ { + negativeIndex++ + + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return false + } + helper.Determine(b[i]) + needDigit = true + continue + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return false + } + negativeIndex = 0 + needDigit = true + continue + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + needDigit = true + if i == 0 { + return false + } + + fallthrough + case '.': + if err := helper.Determine(b[i]); err != nil { + return false + } + needDigit = true + continue + } + + if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) { + return !needDigit + } + + if !helper.CorrectByte(b[i]) { + return false + } + needDigit = false + } + + return !needDigit +} + +func isValid(b []rune) (bool, int, error) { + if len(b) == 0 { + // TODO: should probably return an error + return false, 0, nil + } + + return isValidRune(b[0]), 1, nil +} + +func isValidRune(r rune) bool { + return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n' +} + +// ValueType is an enum that will signify what type +// the Value is +type ValueType int + +func (v ValueType) String() string { + switch v { + case NoneType: + return "NONE" + case DecimalType: + return "FLOAT" + case IntegerType: + return "INT" + case StringType: + return "STRING" + case BoolType: + return "BOOL" + } + + return "" +} + +// ValueType enums +const ( + NoneType = ValueType(iota) + DecimalType + IntegerType + StringType + QuotedStringType + BoolType +) + +// Value is a union container +type Value struct { + Type ValueType + raw []rune + + integer int64 + decimal float64 + boolean bool + str string +} + +func newValue(t ValueType, base int, raw []rune) (Value, error) { + v := Value{ + Type: t, + raw: raw, + } + var err error + + switch t { + case DecimalType: + v.decimal, err = strconv.ParseFloat(string(raw), 64) + case IntegerType: + if base != 10 { + raw = raw[2:] + } + + v.integer, err = strconv.ParseInt(string(raw), base, 64) + case StringType: + v.str = string(raw) + case QuotedStringType: + v.str = string(raw[1 : len(raw)-1]) + case BoolType: + v.boolean = runeCompare(v.raw, runesTrue) + } + + // issue 2253 + // + // if the value trying to be parsed is too large, then we will use + // the 'StringType' and raw value instead. + if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange { + v.Type = StringType + v.str = string(raw) + err = nil + } + + return v, err +} + +// Append will append values and change the type to a string +// type. +func (v *Value) Append(tok Token) { + r := tok.Raw() + if v.Type != QuotedStringType { + v.Type = StringType + r = tok.raw[1 : len(tok.raw)-1] + } + if tok.Type() != TokenLit { + v.raw = append(v.raw, tok.Raw()...) + } else { + v.raw = append(v.raw, r...) + } +} + +func (v Value) String() string { + switch v.Type { + case DecimalType: + return fmt.Sprintf("decimal: %f", v.decimal) + case IntegerType: + return fmt.Sprintf("integer: %d", v.integer) + case StringType: + return fmt.Sprintf("string: %s", string(v.raw)) + case QuotedStringType: + return fmt.Sprintf("quoted string: %s", string(v.raw)) + case BoolType: + return fmt.Sprintf("bool: %t", v.boolean) + default: + return "union not set" + } +} + +func newLitToken(b []rune) (Token, int, error) { + n := 0 + var err error + + token := Token{} + if b[0] == '"' { + n, err = getStringValue(b) + if err != nil { + return token, n, err + } + + token = newToken(TokenLit, b[:n], QuotedStringType) + } else if isNumberValue(b) { + var base int + base, n, err = getNumericalValue(b) + if err != nil { + return token, 0, err + } + + value := b[:n] + vType := IntegerType + if contains(value, '.') || hasExponent(value) { + vType = DecimalType + } + token = newToken(TokenLit, value, vType) + token.base = base + } else if isBoolValue(b) { + n, err = getBoolValue(b) + + token = newToken(TokenLit, b[:n], BoolType) + } else { + n, err = getValue(b) + token = newToken(TokenLit, b[:n], StringType) + } + + return token, n, err +} + +// IntValue returns an integer value +func (v Value) IntValue() int64 { + return v.integer +} + +// FloatValue returns a float value +func (v Value) FloatValue() float64 { + return v.decimal +} + +// BoolValue returns a bool value +func (v Value) BoolValue() bool { + return v.boolean +} + +func isTrimmable(r rune) bool { + switch r { + case '\n', ' ': + return true + } + return false +} + +// StringValue returns the string value +func (v Value) StringValue() string { + switch v.Type { + case StringType: + return strings.TrimFunc(string(v.raw), isTrimmable) + case QuotedStringType: + // preserve all characters in the quotes + return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1])) + default: + return strings.TrimFunc(string(v.raw), isTrimmable) + } +} + +func contains(runes []rune, c rune) bool { + for i := 0; i < len(runes); i++ { + if runes[i] == c { + return true + } + } + + return false +} + +func runeCompare(v1 []rune, v2 []rune) bool { + if len(v1) != len(v2) { + return false + } + + for i := 0; i < len(v1); i++ { + if v1[i] != v2[i] { + return false + } + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go new file mode 100644 index 0000000000..e52ac399f1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go @@ -0,0 +1,30 @@ +package ini + +func isNewline(b []rune) bool { + if len(b) == 0 { + return false + } + + if b[0] == '\n' { + return true + } + + if len(b) < 2 { + return false + } + + return b[0] == '\r' && b[1] == '\n' +} + +func newNewlineToken(b []rune) (Token, int, error) { + i := 1 + if b[0] == '\r' && isNewline(b[1:]) { + i++ + } + + if !isNewline([]rune(b[:i])) { + return emptyToken, 0, NewParseError("invalid new line token") + } + + return newToken(TokenNL, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go new file mode 100644 index 0000000000..a45c0bc566 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go @@ -0,0 +1,152 @@ +package ini + +import ( + "bytes" + "fmt" + "strconv" +) + +const ( + none = numberFormat(iota) + binary + octal + decimal + hex + exponent +) + +type numberFormat int + +// numberHelper is used to dictate what format a number is in +// and what to do for negative values. Since -1e-4 is a valid +// number, we cannot just simply check for duplicate negatives. +type numberHelper struct { + numberFormat numberFormat + + negative bool + negativeExponent bool +} + +func (b numberHelper) Exists() bool { + return b.numberFormat != none +} + +func (b numberHelper) IsNegative() bool { + return b.negative || b.negativeExponent +} + +func (b *numberHelper) Determine(c rune) error { + if b.Exists() { + return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c))) + } + + switch c { + case 'b': + b.numberFormat = binary + case 'o': + b.numberFormat = octal + case 'x': + b.numberFormat = hex + case 'e', 'E': + b.numberFormat = exponent + case '-': + if b.numberFormat != exponent { + b.negative = true + } else { + b.negativeExponent = true + } + case '.': + b.numberFormat = decimal + default: + return NewParseError(fmt.Sprintf("invalid number character: %v", string(c))) + } + + return nil +} + +func (b numberHelper) CorrectByte(c rune) bool { + switch { + case b.numberFormat == binary: + if !isBinaryByte(c) { + return false + } + case b.numberFormat == octal: + if !isOctalByte(c) { + return false + } + case b.numberFormat == hex: + if !isHexByte(c) { + return false + } + case b.numberFormat == decimal: + if !isDigit(c) { + return false + } + case b.numberFormat == exponent: + if !isDigit(c) { + return false + } + case b.negativeExponent: + if !isDigit(c) { + return false + } + case b.negative: + if !isDigit(c) { + return false + } + default: + if !isDigit(c) { + return false + } + } + + return true +} + +func (b numberHelper) Base() int { + switch b.numberFormat { + case binary: + return 2 + case octal: + return 8 + case hex: + return 16 + default: + return 10 + } +} + +func (b numberHelper) String() string { + buf := bytes.Buffer{} + i := 0 + + switch b.numberFormat { + case binary: + i++ + buf.WriteString(strconv.Itoa(i) + ": binary format\n") + case octal: + i++ + buf.WriteString(strconv.Itoa(i) + ": octal format\n") + case hex: + i++ + buf.WriteString(strconv.Itoa(i) + ": hex format\n") + case exponent: + i++ + buf.WriteString(strconv.Itoa(i) + ": exponent format\n") + default: + i++ + buf.WriteString(strconv.Itoa(i) + ": integer format\n") + } + + if b.negative { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative format\n") + } + + if b.negativeExponent { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n") + } + + return buf.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go new file mode 100644 index 0000000000..8a84c7cbe0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go @@ -0,0 +1,39 @@ +package ini + +import ( + "fmt" +) + +var ( + equalOp = []rune("=") + equalColonOp = []rune(":") +) + +func isOp(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '=': + return true + case ':': + return true + default: + return false + } +} + +func newOpToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '=': + tok = newToken(TokenOp, equalOp, NoneType) + case ':': + tok = newToken(TokenOp, equalColonOp, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go new file mode 100644 index 0000000000..4572870193 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go @@ -0,0 +1,43 @@ +package ini + +import "fmt" + +const ( + // ErrCodeParseError is returned when a parsing error + // has occurred. + ErrCodeParseError = "INIParseError" +) + +// ParseError is an error which is returned during any part of +// the parsing process. +type ParseError struct { + msg string +} + +// NewParseError will return a new ParseError where message +// is the description of the error. +func NewParseError(message string) *ParseError { + return &ParseError{ + msg: message, + } +} + +// Code will return the ErrCodeParseError +func (err *ParseError) Code() string { + return ErrCodeParseError +} + +// Message returns the error's message +func (err *ParseError) Message() string { + return err.msg +} + +// OrigError return nothing since there will never be any +// original error. +func (err *ParseError) OrigError() error { + return nil +} + +func (err *ParseError) Error() string { + return fmt.Sprintf("%s: %s", err.Code(), err.Message()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go new file mode 100644 index 0000000000..7f01cf7c70 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go @@ -0,0 +1,60 @@ +package ini + +import ( + "bytes" + "fmt" +) + +// ParseStack is a stack that contains a container, the stack portion, +// and the list which is the list of ASTs that have been successfully +// parsed. +type ParseStack struct { + top int + container []AST + list []AST + index int +} + +func newParseStack(sizeContainer, sizeList int) ParseStack { + return ParseStack{ + container: make([]AST, sizeContainer), + list: make([]AST, sizeList), + } +} + +// Pop will return and truncate the last container element. +func (s *ParseStack) Pop() AST { + s.top-- + return s.container[s.top] +} + +// Push will add the new AST to the container +func (s *ParseStack) Push(ast AST) { + s.container[s.top] = ast + s.top++ +} + +// MarkComplete will append the AST to the list of completed statements +func (s *ParseStack) MarkComplete(ast AST) { + s.list[s.index] = ast + s.index++ +} + +// List will return the completed statements +func (s ParseStack) List() []AST { + return s.list[:s.index] +} + +// Len will return the length of the container +func (s *ParseStack) Len() int { + return s.top +} + +func (s ParseStack) String() string { + buf := bytes.Buffer{} + for i, node := range s.list { + buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node)) + } + + return buf.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go new file mode 100644 index 0000000000..f82095ba25 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go @@ -0,0 +1,41 @@ +package ini + +import ( + "fmt" +) + +var ( + emptyRunes = []rune{} +) + +func isSep(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '[', ']': + return true + default: + return false + } +} + +var ( + openBrace = []rune("[") + closeBrace = []rune("]") +) + +func newSepToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '[': + tok = newToken(TokenSep, openBrace, NoneType) + case ']': + tok = newToken(TokenSep, closeBrace, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go new file mode 100644 index 0000000000..6bb6964475 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go @@ -0,0 +1,45 @@ +package ini + +// skipper is used to skip certain blocks of an ini file. +// Currently skipper is used to skip nested blocks of ini +// files. See example below +// +// [ foo ] +// nested = ; this section will be skipped +// a=b +// c=d +// bar=baz ; this will be included +type skipper struct { + shouldSkip bool + TokenSet bool + prevTok Token +} + +func newSkipper() skipper { + return skipper{ + prevTok: emptyToken, + } +} + +func (s *skipper) ShouldSkip(tok Token) bool { + if s.shouldSkip && + s.prevTok.Type() == TokenNL && + tok.Type() != TokenWS { + + s.Continue() + return false + } + s.prevTok = tok + + return s.shouldSkip +} + +func (s *skipper) Skip() { + s.shouldSkip = true + s.prevTok = emptyToken +} + +func (s *skipper) Continue() { + s.shouldSkip = false + s.prevTok = emptyToken +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go new file mode 100644 index 0000000000..18f3fe8931 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go @@ -0,0 +1,35 @@ +package ini + +// Statement is an empty AST mostly used for transitioning states. +func newStatement() AST { + return newAST(ASTKindStatement, AST{}) +} + +// SectionStatement represents a section AST +func newSectionStatement(tok Token) AST { + return newASTWithRootToken(ASTKindSectionStatement, tok) +} + +// ExprStatement represents a completed expression AST +func newExprStatement(ast AST) AST { + return newAST(ASTKindExprStatement, ast) +} + +// CommentStatement represents a comment in the ini definition. +// +// grammar: +// comment -> #comment' | ;comment' +// comment' -> epsilon | value +func newCommentStatement(tok Token) AST { + return newAST(ASTKindCommentStatement, newExpression(tok)) +} + +// CompletedSectionStatement represents a completed section +func newCompletedSectionStatement(ast AST) AST { + return newAST(ASTKindCompletedSectionStatement, ast) +} + +// SkipStatement is used to skip whole statements +func newSkipStatement(ast AST) AST { + return newAST(ASTKindSkipStatement, ast) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go new file mode 100644 index 0000000000..305999d29b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go @@ -0,0 +1,284 @@ +package ini + +import ( + "fmt" +) + +// getStringValue will return a quoted string and the amount +// of bytes read +// +// an error will be returned if the string is not properly formatted +func getStringValue(b []rune) (int, error) { + if b[0] != '"' { + return 0, NewParseError("strings must start with '\"'") + } + + endQuote := false + i := 1 + + for ; i < len(b) && !endQuote; i++ { + if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped { + endQuote = true + break + } else if escaped { + /*c, err := getEscapedByte(b[i]) + if err != nil { + return 0, err + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i--*/ + + continue + } + } + + if !endQuote { + return 0, NewParseError("missing '\"' in string value") + } + + return i + 1, nil +} + +// getBoolValue will return a boolean and the amount +// of bytes read +// +// an error will be returned if the boolean is not of a correct +// value +func getBoolValue(b []rune) (int, error) { + if len(b) < 4 { + return 0, NewParseError("invalid boolean value") + } + + n := 0 + for _, lv := range literalValues { + if len(lv) > len(b) { + continue + } + + if isLitValue(lv, b) { + n = len(lv) + } + } + + if n == 0 { + return 0, NewParseError("invalid boolean value") + } + + return n, nil +} + +// getNumericalValue will return a numerical string, the amount +// of bytes read, and the base of the number +// +// an error will be returned if the number is not of a correct +// value +func getNumericalValue(b []rune) (int, int, error) { + if !isDigit(b[0]) { + return 0, 0, NewParseError("invalid digit value") + } + + i := 0 + helper := numberHelper{} + +loop: + for negativeIndex := 0; i < len(b); i++ { + negativeIndex++ + + if !isDigit(b[i]) { + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return 0, 0, NewParseError("parse error '-'") + } + + n := getNegativeNumber(b[i:]) + i += (n - 1) + helper.Determine(b[i]) + continue + case '.': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + + negativeIndex = 0 + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + if i == 0 && b[i] != '0' { + return 0, 0, NewParseError("incorrect base format, expected leading '0'") + } + + if i != 1 { + return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i)) + } + + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + default: + if isWhitespace(b[i]) { + break loop + } + + if isNewline(b[i:]) { + break loop + } + + if !(helper.numberFormat == hex && isHexByte(b[i])) { + if i+2 < len(b) && !isNewline(b[i:i+2]) { + return 0, 0, NewParseError("invalid numerical character") + } else if !isNewline([]rune{b[i]}) { + return 0, 0, NewParseError("invalid numerical character") + } + + break loop + } + } + } + } + + return helper.Base(), i, nil +} + +// isDigit will return whether or not something is an integer +func isDigit(b rune) bool { + return b >= '0' && b <= '9' +} + +func hasExponent(v []rune) bool { + return contains(v, 'e') || contains(v, 'E') +} + +func isBinaryByte(b rune) bool { + switch b { + case '0', '1': + return true + default: + return false + } +} + +func isOctalByte(b rune) bool { + switch b { + case '0', '1', '2', '3', '4', '5', '6', '7': + return true + default: + return false + } +} + +func isHexByte(b rune) bool { + if isDigit(b) { + return true + } + return (b >= 'A' && b <= 'F') || + (b >= 'a' && b <= 'f') +} + +func getValue(b []rune) (int, error) { + i := 0 + + for i < len(b) { + if isNewline(b[i:]) { + break + } + + if isOp(b[i:]) { + break + } + + valid, n, err := isValid(b[i:]) + if err != nil { + return 0, err + } + + if !valid { + break + } + + i += n + } + + return i, nil +} + +// getNegativeNumber will return a negative number from a +// byte slice. This will iterate through all characters until +// a non-digit has been found. +func getNegativeNumber(b []rune) int { + if b[0] != '-' { + return 0 + } + + i := 1 + for ; i < len(b); i++ { + if !isDigit(b[i]) { + return i + } + } + + return i +} + +// isEscaped will return whether or not the character is an escaped +// character. +func isEscaped(value []rune, b rune) bool { + if len(value) == 0 { + return false + } + + switch b { + case '\'': // single quote + case '"': // quote + case 'n': // newline + case 't': // tab + case '\\': // backslash + default: + return false + } + + return value[len(value)-1] == '\\' +} + +func getEscapedByte(b rune) (rune, error) { + switch b { + case '\'': // single quote + return '\'', nil + case '"': // quote + return '"', nil + case 'n': // newline + return '\n', nil + case 't': // table + return '\t', nil + case '\\': // backslash + return '\\', nil + default: + return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b)) + } +} + +func removeEscapedCharacters(b []rune) []rune { + for i := 0; i < len(b); i++ { + if isEscaped(b[:i], b[i]) { + c, err := getEscapedByte(b[i]) + if err != nil { + return b + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i-- + } + } + + return b +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go new file mode 100644 index 0000000000..94841c3244 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go @@ -0,0 +1,166 @@ +package ini + +import ( + "fmt" + "sort" +) + +// Visitor is an interface used by walkers that will +// traverse an array of ASTs. +type Visitor interface { + VisitExpr(AST) error + VisitStatement(AST) error +} + +// DefaultVisitor is used to visit statements and expressions +// and ensure that they are both of the correct format. +// In addition, upon visiting this will build sections and populate +// the Sections field which can be used to retrieve profile +// configuration. +type DefaultVisitor struct { + scope string + Sections Sections +} + +// NewDefaultVisitor return a DefaultVisitor +func NewDefaultVisitor() *DefaultVisitor { + return &DefaultVisitor{ + Sections: Sections{ + container: map[string]Section{}, + }, + } +} + +// VisitExpr visits expressions... +func (v *DefaultVisitor) VisitExpr(expr AST) error { + t := v.Sections.container[v.scope] + if t.values == nil { + t.values = values{} + } + + switch expr.Kind { + case ASTKindExprStatement: + opExpr := expr.GetRoot() + switch opExpr.Kind { + case ASTKindEqualExpr: + children := opExpr.GetChildren() + if len(children) <= 1 { + return NewParseError("unexpected token type") + } + + rhs := children[1] + + if rhs.Root.Type() != TokenLit { + return NewParseError("unexpected token type") + } + + key := EqualExprKey(opExpr) + v, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw()) + if err != nil { + return err + } + + t.values[key] = v + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + + v.Sections.container[v.scope] = t + return nil +} + +// VisitStatement visits statements... +func (v *DefaultVisitor) VisitStatement(stmt AST) error { + switch stmt.Kind { + case ASTKindCompletedSectionStatement: + child := stmt.GetRoot() + if child.Kind != ASTKindSectionStatement { + return NewParseError(fmt.Sprintf("unsupported child statement: %T", child)) + } + + name := string(child.Root.Raw()) + v.Sections.container[name] = Section{} + v.scope = name + default: + return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind)) + } + + return nil +} + +// Sections is a map of Section structures that represent +// a configuration. +type Sections struct { + container map[string]Section +} + +// GetSection will return section p. If section p does not exist, +// false will be returned in the second parameter. +func (t Sections) GetSection(p string) (Section, bool) { + v, ok := t.container[p] + return v, ok +} + +// values represents a map of union values. +type values map[string]Value + +// List will return a list of all sections that were successfully +// parsed. +func (t Sections) List() []string { + keys := make([]string, len(t.container)) + i := 0 + for k := range t.container { + keys[i] = k + i++ + } + + sort.Strings(keys) + return keys +} + +// Section contains a name and values. This represent +// a sectioned entry in a configuration file. +type Section struct { + Name string + values values +} + +// Has will return whether or not an entry exists in a given section +func (t Section) Has(k string) bool { + _, ok := t.values[k] + return ok +} + +// ValueType will returned what type the union is set to. If +// k was not found, the NoneType will be returned. +func (t Section) ValueType(k string) (ValueType, bool) { + v, ok := t.values[k] + return v.Type, ok +} + +// Bool returns a bool value at k +func (t Section) Bool(k string) bool { + return t.values[k].BoolValue() +} + +// Int returns an integer value at k +func (t Section) Int(k string) int64 { + return t.values[k].IntValue() +} + +// Float64 returns a float value at k +func (t Section) Float64(k string) float64 { + return t.values[k].FloatValue() +} + +// String returns the string value at k +func (t Section) String(k string) string { + _, ok := t.values[k] + if !ok { + return "" + } + return t.values[k].StringValue() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go new file mode 100644 index 0000000000..99915f7f77 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go @@ -0,0 +1,25 @@ +package ini + +// Walk will traverse the AST using the v, the Visitor. +func Walk(tree []AST, v Visitor) error { + for _, node := range tree { + switch node.Kind { + case ASTKindExpr, + ASTKindExprStatement: + + if err := v.VisitExpr(node); err != nil { + return err + } + case ASTKindStatement, + ASTKindCompletedSectionStatement, + ASTKindNestedSectionStatement, + ASTKindCompletedNestedSectionStatement: + + if err := v.VisitStatement(node); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go new file mode 100644 index 0000000000..7ffb4ae06f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go @@ -0,0 +1,24 @@ +package ini + +import ( + "unicode" +) + +// isWhitespace will return whether or not the character is +// a whitespace character. +// +// Whitespace is defined as a space or tab. +func isWhitespace(c rune) bool { + return unicode.IsSpace(c) && c != '\n' && c != '\r' +} + +func newWSToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if !isWhitespace(b[i]) { + break + } + } + + return newToken(TokenWS, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go b/vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go new file mode 100644 index 0000000000..0b9b0dfce0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go @@ -0,0 +1,57 @@ +package s3err + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RequestFailure provides additional S3 specific metadata for the request +// failure. +type RequestFailure struct { + awserr.RequestFailure + + hostID string +} + +// NewRequestFailure returns a request failure error decordated with S3 +// specific metadata. +func NewRequestFailure(err awserr.RequestFailure, hostID string) *RequestFailure { + return &RequestFailure{RequestFailure: err, hostID: hostID} +} + +func (r RequestFailure) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s", + r.StatusCode(), r.RequestID(), r.hostID) + return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} +func (r RequestFailure) String() string { + return r.Error() +} + +// HostID returns the HostID request response value. +func (r RequestFailure) HostID() string { + return r.hostID +} + +// RequestFailureWrapperHandler returns a handler to rap an +// awserr.RequestFailure with the S3 request ID 2 from the response. +func RequestFailureWrapperHandler() request.NamedHandler { + return request.NamedHandler{ + Name: "awssdk.s3.errorHandler", + Fn: func(req *request.Request) { + reqErr, ok := req.Error.(awserr.RequestFailure) + if !ok || reqErr == nil { + return + } + + hostID := req.HTTPResponse.Header.Get("X-Amz-Id-2") + if req.Error == nil { + return + } + + req.Error = NewRequestFailure(reqErr, hostID) + }, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go new file mode 100644 index 0000000000..5aa9137e0f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go @@ -0,0 +1,10 @@ +// +build !go1.7 + +package sdkio + +// Copy of Go 1.7 io package's Seeker constants. +const ( + SeekStart = 0 // seek relative to the origin of the file + SeekCurrent = 1 // seek relative to the current offset + SeekEnd = 2 // seek relative to the end +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go new file mode 100644 index 0000000000..e5f005613b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go @@ -0,0 +1,12 @@ +// +build go1.7 + +package sdkio + +import "io" + +// Alias for Go 1.7 io package Seeker constants +const ( + SeekStart = io.SeekStart // seek relative to the origin of the file + SeekCurrent = io.SeekCurrent // seek relative to the current offset + SeekEnd = io.SeekEnd // seek relative to the end +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go new file mode 100644 index 0000000000..0c9802d877 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go @@ -0,0 +1,29 @@ +package sdkrand + +import ( + "math/rand" + "sync" + "time" +) + +// lockedSource is a thread-safe implementation of rand.Source +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// SeededRand is a new RNG using a thread safe implementation of rand.Source +var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go new file mode 100644 index 0000000000..38ea61afea --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go @@ -0,0 +1,23 @@ +package sdkuri + +import ( + "path" + "strings" +) + +// PathJoin will join the elements of the path delimited by the "/" +// character. Similar to path.Join with the exception the trailing "/" +// character is preserved if present. +func PathJoin(elems ...string) string { + if len(elems) == 0 { + return "" + } + + hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/") + str := path.Join(elems...) + if hasTrailing && str != "/" { + str += "/" + } + + return str +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go new file mode 100644 index 0000000000..7da8a49ce5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go @@ -0,0 +1,12 @@ +package shareddefaults + +const ( + // ECSCredsProviderEnvVar is an environmental variable key used to + // determine which path needs to be hit. + ECSCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" +) + +// ECSContainerCredentialsURI is the endpoint to retrieve container +// credentials. This can be overridden to test to ensure the credential process +// is behaving correctly. +var ECSContainerCredentialsURI = "http://169.254.170.2" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go new file mode 100644 index 0000000000..ecc7bf82fa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go @@ -0,0 +1,144 @@ +package eventstream + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "strconv" +) + +type decodedMessage struct { + rawMessage + Headers decodedHeaders `json:"headers"` +} +type jsonMessage struct { + Length json.Number `json:"total_length"` + HeadersLen json.Number `json:"headers_length"` + PreludeCRC json.Number `json:"prelude_crc"` + Headers decodedHeaders `json:"headers"` + Payload []byte `json:"payload"` + CRC json.Number `json:"message_crc"` +} + +func (d *decodedMessage) UnmarshalJSON(b []byte) (err error) { + var jsonMsg jsonMessage + if err = json.Unmarshal(b, &jsonMsg); err != nil { + return err + } + + d.Length, err = numAsUint32(jsonMsg.Length) + if err != nil { + return err + } + d.HeadersLen, err = numAsUint32(jsonMsg.HeadersLen) + if err != nil { + return err + } + d.PreludeCRC, err = numAsUint32(jsonMsg.PreludeCRC) + if err != nil { + return err + } + d.Headers = jsonMsg.Headers + d.Payload = jsonMsg.Payload + d.CRC, err = numAsUint32(jsonMsg.CRC) + if err != nil { + return err + } + + return nil +} + +func (d *decodedMessage) MarshalJSON() ([]byte, error) { + jsonMsg := jsonMessage{ + Length: json.Number(strconv.Itoa(int(d.Length))), + HeadersLen: json.Number(strconv.Itoa(int(d.HeadersLen))), + PreludeCRC: json.Number(strconv.Itoa(int(d.PreludeCRC))), + Headers: d.Headers, + Payload: d.Payload, + CRC: json.Number(strconv.Itoa(int(d.CRC))), + } + + return json.Marshal(jsonMsg) +} + +func numAsUint32(n json.Number) (uint32, error) { + v, err := n.Int64() + if err != nil { + return 0, fmt.Errorf("failed to get int64 json number, %v", err) + } + + return uint32(v), nil +} + +func (d decodedMessage) Message() Message { + return Message{ + Headers: Headers(d.Headers), + Payload: d.Payload, + } +} + +type decodedHeaders Headers + +func (hs *decodedHeaders) UnmarshalJSON(b []byte) error { + var jsonHeaders []struct { + Name string `json:"name"` + Type valueType `json:"type"` + Value interface{} `json:"value"` + } + + decoder := json.NewDecoder(bytes.NewReader(b)) + decoder.UseNumber() + if err := decoder.Decode(&jsonHeaders); err != nil { + return err + } + + var headers Headers + for _, h := range jsonHeaders { + value, err := valueFromType(h.Type, h.Value) + if err != nil { + return err + } + headers.Set(h.Name, value) + } + (*hs) = decodedHeaders(headers) + + return nil +} + +func valueFromType(typ valueType, val interface{}) (Value, error) { + switch typ { + case trueValueType: + return BoolValue(true), nil + case falseValueType: + return BoolValue(false), nil + case int8ValueType: + v, err := val.(json.Number).Int64() + return Int8Value(int8(v)), err + case int16ValueType: + v, err := val.(json.Number).Int64() + return Int16Value(int16(v)), err + case int32ValueType: + v, err := val.(json.Number).Int64() + return Int32Value(int32(v)), err + case int64ValueType: + v, err := val.(json.Number).Int64() + return Int64Value(v), err + case bytesValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + return BytesValue(v), err + case stringValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + return StringValue(string(v)), err + case timestampValueType: + v, err := val.(json.Number).Int64() + return TimestampValue(timeFromEpochMilli(v)), err + case uuidValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + var tv UUIDValue + copy(tv[:], v) + return tv, err + default: + panic(fmt.Sprintf("unknown type, %s, %T", typ.String(), val)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go new file mode 100644 index 0000000000..4b972b2d66 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go @@ -0,0 +1,199 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "hash/crc32" + "io" + + "github.com/aws/aws-sdk-go/aws" +) + +// Decoder provides decoding of an Event Stream messages. +type Decoder struct { + r io.Reader + logger aws.Logger +} + +// NewDecoder initializes and returns a Decoder for decoding event +// stream messages from the reader provided. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + r: r, + } +} + +// Decode attempts to decode a single message from the event stream reader. +// Will return the event stream message, or error if Decode fails to read +// the message from the stream. +func (d *Decoder) Decode(payloadBuf []byte) (m Message, err error) { + reader := d.r + if d.logger != nil { + debugMsgBuf := bytes.NewBuffer(nil) + reader = io.TeeReader(reader, debugMsgBuf) + defer func() { + logMessageDecode(d.logger, debugMsgBuf, m, err) + }() + } + + crc := crc32.New(crc32IEEETable) + hashReader := io.TeeReader(reader, crc) + + prelude, err := decodePrelude(hashReader, crc) + if err != nil { + return Message{}, err + } + + if prelude.HeadersLen > 0 { + lr := io.LimitReader(hashReader, int64(prelude.HeadersLen)) + m.Headers, err = decodeHeaders(lr) + if err != nil { + return Message{}, err + } + } + + if payloadLen := prelude.PayloadLen(); payloadLen > 0 { + buf, err := decodePayload(payloadBuf, io.LimitReader(hashReader, int64(payloadLen))) + if err != nil { + return Message{}, err + } + m.Payload = buf + } + + msgCRC := crc.Sum32() + if err := validateCRC(reader, msgCRC); err != nil { + return Message{}, err + } + + return m, nil +} + +// UseLogger specifies the Logger that that the decoder should use to log the +// message decode to. +func (d *Decoder) UseLogger(logger aws.Logger) { + d.logger = logger +} + +func logMessageDecode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, decodeErr error) { + w := bytes.NewBuffer(nil) + defer func() { logger.Log(w.String()) }() + + fmt.Fprintf(w, "Raw message:\n%s\n", + hex.Dump(msgBuf.Bytes())) + + if decodeErr != nil { + fmt.Fprintf(w, "Decode error: %v\n", decodeErr) + return + } + + rawMsg, err := msg.rawMessage() + if err != nil { + fmt.Fprintf(w, "failed to create raw message, %v\n", err) + return + } + + decodedMsg := decodedMessage{ + rawMessage: rawMsg, + Headers: decodedHeaders(msg.Headers), + } + + fmt.Fprintf(w, "Decoded message:\n") + encoder := json.NewEncoder(w) + if err := encoder.Encode(decodedMsg); err != nil { + fmt.Fprintf(w, "failed to generate decoded message, %v\n", err) + } +} + +func decodePrelude(r io.Reader, crc hash.Hash32) (messagePrelude, error) { + var p messagePrelude + + var err error + p.Length, err = decodeUint32(r) + if err != nil { + return messagePrelude{}, err + } + + p.HeadersLen, err = decodeUint32(r) + if err != nil { + return messagePrelude{}, err + } + + if err := p.ValidateLens(); err != nil { + return messagePrelude{}, err + } + + preludeCRC := crc.Sum32() + if err := validateCRC(r, preludeCRC); err != nil { + return messagePrelude{}, err + } + + p.PreludeCRC = preludeCRC + + return p, nil +} + +func decodePayload(buf []byte, r io.Reader) ([]byte, error) { + w := bytes.NewBuffer(buf[0:0]) + + _, err := io.Copy(w, r) + return w.Bytes(), err +} + +func decodeUint8(r io.Reader) (uint8, error) { + type byteReader interface { + ReadByte() (byte, error) + } + + if br, ok := r.(byteReader); ok { + v, err := br.ReadByte() + return uint8(v), err + } + + var b [1]byte + _, err := io.ReadFull(r, b[:]) + return uint8(b[0]), err +} +func decodeUint16(r io.Reader) (uint16, error) { + var b [2]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint16(bs), nil +} +func decodeUint32(r io.Reader) (uint32, error) { + var b [4]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint32(bs), nil +} +func decodeUint64(r io.Reader) (uint64, error) { + var b [8]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint64(bs), nil +} + +func validateCRC(r io.Reader, expect uint32) error { + msgCRC, err := decodeUint32(r) + if err != nil { + return err + } + + if msgCRC != expect { + return ChecksumError{} + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go new file mode 100644 index 0000000000..150a60981d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go @@ -0,0 +1,114 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "hash" + "hash/crc32" + "io" +) + +// Encoder provides EventStream message encoding. +type Encoder struct { + w io.Writer + + headersBuf *bytes.Buffer +} + +// NewEncoder initializes and returns an Encoder to encode Event Stream +// messages to an io.Writer. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: w, + headersBuf: bytes.NewBuffer(nil), + } +} + +// Encode encodes a single EventStream message to the io.Writer the Encoder +// was created with. An error is returned if writing the message fails. +func (e *Encoder) Encode(msg Message) error { + e.headersBuf.Reset() + + err := encodeHeaders(e.headersBuf, msg.Headers) + if err != nil { + return err + } + + crc := crc32.New(crc32IEEETable) + hashWriter := io.MultiWriter(e.w, crc) + + headersLen := uint32(e.headersBuf.Len()) + payloadLen := uint32(len(msg.Payload)) + + if err := encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil { + return err + } + + if headersLen > 0 { + if _, err := io.Copy(hashWriter, e.headersBuf); err != nil { + return err + } + } + + if payloadLen > 0 { + if _, err := hashWriter.Write(msg.Payload); err != nil { + return err + } + } + + msgCRC := crc.Sum32() + return binary.Write(e.w, binary.BigEndian, msgCRC) +} + +func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) error { + p := messagePrelude{ + Length: minMsgLen + headersLen + payloadLen, + HeadersLen: headersLen, + } + if err := p.ValidateLens(); err != nil { + return err + } + + err := binaryWriteFields(w, binary.BigEndian, + p.Length, + p.HeadersLen, + ) + if err != nil { + return err + } + + p.PreludeCRC = crc.Sum32() + err = binary.Write(w, binary.BigEndian, p.PreludeCRC) + if err != nil { + return err + } + + return nil +} + +func encodeHeaders(w io.Writer, headers Headers) error { + for _, h := range headers { + hn := headerName{ + Len: uint8(len(h.Name)), + } + copy(hn.Name[:hn.Len], h.Name) + if err := hn.encode(w); err != nil { + return err + } + + if err := h.Value.encode(w); err != nil { + return err + } + } + + return nil +} + +func binaryWriteFields(w io.Writer, order binary.ByteOrder, vs ...interface{}) error { + for _, v := range vs { + if err := binary.Write(w, order, v); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go new file mode 100644 index 0000000000..5481ef3079 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go @@ -0,0 +1,23 @@ +package eventstream + +import "fmt" + +// LengthError provides the error for items being larger than a maximum length. +type LengthError struct { + Part string + Want int + Have int + Value interface{} +} + +func (e LengthError) Error() string { + return fmt.Sprintf("%s length invalid, %d/%d, %v", + e.Part, e.Want, e.Have, e.Value) +} + +// ChecksumError provides the error for message checksum invalidation errors. +type ChecksumError struct{} + +func (e ChecksumError) Error() string { + return "message checksum mismatch" +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go new file mode 100644 index 0000000000..97937c8e59 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go @@ -0,0 +1,196 @@ +package eventstreamapi + +import ( + "fmt" + "io" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" +) + +// Unmarshaler provides the interface for unmarshaling a EventStream +// message into a SDK type. +type Unmarshaler interface { + UnmarshalEvent(protocol.PayloadUnmarshaler, eventstream.Message) error +} + +// EventStream headers with specific meaning to async API functionality. +const ( + MessageTypeHeader = `:message-type` // Identifies type of message. + EventMessageType = `event` + ErrorMessageType = `error` + ExceptionMessageType = `exception` + + // Message Events + EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats". + + // Message Error + ErrorCodeHeader = `:error-code` + ErrorMessageHeader = `:error-message` + + // Message Exception + ExceptionTypeHeader = `:exception-type` +) + +// EventReader provides reading from the EventStream of an reader. +type EventReader struct { + reader io.ReadCloser + decoder *eventstream.Decoder + + unmarshalerForEventType func(string) (Unmarshaler, error) + payloadUnmarshaler protocol.PayloadUnmarshaler + + payloadBuf []byte +} + +// NewEventReader returns a EventReader built from the reader and unmarshaler +// provided. Use ReadStream method to start reading from the EventStream. +func NewEventReader( + reader io.ReadCloser, + payloadUnmarshaler protocol.PayloadUnmarshaler, + unmarshalerForEventType func(string) (Unmarshaler, error), +) *EventReader { + return &EventReader{ + reader: reader, + decoder: eventstream.NewDecoder(reader), + payloadUnmarshaler: payloadUnmarshaler, + unmarshalerForEventType: unmarshalerForEventType, + payloadBuf: make([]byte, 10*1024), + } +} + +// UseLogger instructs the EventReader to use the logger and log level +// specified. +func (r *EventReader) UseLogger(logger aws.Logger, logLevel aws.LogLevelType) { + if logger != nil && logLevel.Matches(aws.LogDebugWithEventStreamBody) { + r.decoder.UseLogger(logger) + } +} + +// ReadEvent attempts to read a message from the EventStream and return the +// unmarshaled event value that the message is for. +// +// For EventStream API errors check if the returned error satisfies the +// awserr.Error interface to get the error's Code and Message components. +// +// EventUnmarshalers called with EventStream messages must take copies of the +// message's Payload. The payload will is reused between events read. +func (r *EventReader) ReadEvent() (event interface{}, err error) { + msg, err := r.decoder.Decode(r.payloadBuf) + if err != nil { + return nil, err + } + defer func() { + // Reclaim payload buffer for next message read. + r.payloadBuf = msg.Payload[0:0] + }() + + typ, err := GetHeaderString(msg, MessageTypeHeader) + if err != nil { + return nil, err + } + + switch typ { + case EventMessageType: + return r.unmarshalEventMessage(msg) + case ExceptionMessageType: + err = r.unmarshalEventException(msg) + return nil, err + case ErrorMessageType: + return nil, r.unmarshalErrorMessage(msg) + default: + return nil, fmt.Errorf("unknown eventstream message type, %v", typ) + } +} + +func (r *EventReader) unmarshalEventMessage( + msg eventstream.Message, +) (event interface{}, err error) { + eventType, err := GetHeaderString(msg, EventTypeHeader) + if err != nil { + return nil, err + } + + ev, err := r.unmarshalerForEventType(eventType) + if err != nil { + return nil, err + } + + err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg) + if err != nil { + return nil, err + } + + return ev, nil +} + +func (r *EventReader) unmarshalEventException( + msg eventstream.Message, +) (err error) { + eventType, err := GetHeaderString(msg, ExceptionTypeHeader) + if err != nil { + return err + } + + ev, err := r.unmarshalerForEventType(eventType) + if err != nil { + return err + } + + err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg) + if err != nil { + return err + } + + var ok bool + err, ok = ev.(error) + if !ok { + err = messageError{ + code: "SerializationError", + msg: fmt.Sprintf( + "event stream exception %s mapped to non-error %T, %v", + eventType, ev, ev, + ), + } + } + + return err +} + +func (r *EventReader) unmarshalErrorMessage(msg eventstream.Message) (err error) { + var msgErr messageError + + msgErr.code, err = GetHeaderString(msg, ErrorCodeHeader) + if err != nil { + return err + } + + msgErr.msg, err = GetHeaderString(msg, ErrorMessageHeader) + if err != nil { + return err + } + + return msgErr +} + +// Close closes the EventReader's EventStream reader. +func (r *EventReader) Close() error { + return r.reader.Close() +} + +// GetHeaderString returns the value of the header as a string. If the header +// is not set or the value is not a string an error will be returned. +func GetHeaderString(msg eventstream.Message, headerName string) (string, error) { + headerVal := msg.Headers.Get(headerName) + if headerVal == nil { + return "", fmt.Errorf("error header %s not present", headerName) + } + + v, ok := headerVal.Get().(string) + if !ok { + return "", fmt.Errorf("error header value is not a string, %T", headerVal) + } + + return v, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go new file mode 100644 index 0000000000..5ea5a988b6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go @@ -0,0 +1,24 @@ +package eventstreamapi + +import "fmt" + +type messageError struct { + code string + msg string +} + +func (e messageError) Code() string { + return e.code +} + +func (e messageError) Message() string { + return e.msg +} + +func (e messageError) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.msg) +} + +func (e messageError) OrigErr() error { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go new file mode 100644 index 0000000000..3b44dde2f3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go @@ -0,0 +1,166 @@ +package eventstream + +import ( + "encoding/binary" + "fmt" + "io" +) + +// Headers are a collection of EventStream header values. +type Headers []Header + +// Header is a single EventStream Key Value header pair. +type Header struct { + Name string + Value Value +} + +// Set associates the name with a value. If the header name already exists in +// the Headers the value will be replaced with the new one. +func (hs *Headers) Set(name string, value Value) { + var i int + for ; i < len(*hs); i++ { + if (*hs)[i].Name == name { + (*hs)[i].Value = value + return + } + } + + *hs = append(*hs, Header{ + Name: name, Value: value, + }) +} + +// Get returns the Value associated with the header. Nil is returned if the +// value does not exist. +func (hs Headers) Get(name string) Value { + for i := 0; i < len(hs); i++ { + if h := hs[i]; h.Name == name { + return h.Value + } + } + return nil +} + +// Del deletes the value in the Headers if it exists. +func (hs *Headers) Del(name string) { + for i := 0; i < len(*hs); i++ { + if (*hs)[i].Name == name { + copy((*hs)[i:], (*hs)[i+1:]) + (*hs) = (*hs)[:len(*hs)-1] + } + } +} + +func decodeHeaders(r io.Reader) (Headers, error) { + hs := Headers{} + + for { + name, err := decodeHeaderName(r) + if err != nil { + if err == io.EOF { + // EOF while getting header name means no more headers + break + } + return nil, err + } + + value, err := decodeHeaderValue(r) + if err != nil { + return nil, err + } + + hs.Set(name, value) + } + + return hs, nil +} + +func decodeHeaderName(r io.Reader) (string, error) { + var n headerName + + var err error + n.Len, err = decodeUint8(r) + if err != nil { + return "", err + } + + name := n.Name[:n.Len] + if _, err := io.ReadFull(r, name); err != nil { + return "", err + } + + return string(name), nil +} + +func decodeHeaderValue(r io.Reader) (Value, error) { + var raw rawValue + + typ, err := decodeUint8(r) + if err != nil { + return nil, err + } + raw.Type = valueType(typ) + + var v Value + + switch raw.Type { + case trueValueType: + v = BoolValue(true) + case falseValueType: + v = BoolValue(false) + case int8ValueType: + var tv Int8Value + err = tv.decode(r) + v = tv + case int16ValueType: + var tv Int16Value + err = tv.decode(r) + v = tv + case int32ValueType: + var tv Int32Value + err = tv.decode(r) + v = tv + case int64ValueType: + var tv Int64Value + err = tv.decode(r) + v = tv + case bytesValueType: + var tv BytesValue + err = tv.decode(r) + v = tv + case stringValueType: + var tv StringValue + err = tv.decode(r) + v = tv + case timestampValueType: + var tv TimestampValue + err = tv.decode(r) + v = tv + case uuidValueType: + var tv UUIDValue + err = tv.decode(r) + v = tv + default: + panic(fmt.Sprintf("unknown value type %d", raw.Type)) + } + + // Error could be EOF, let caller deal with it + return v, err +} + +const maxHeaderNameLen = 255 + +type headerName struct { + Len uint8 + Name [maxHeaderNameLen]byte +} + +func (v headerName) encode(w io.Writer) error { + if err := binary.Write(w, binary.BigEndian, v.Len); err != nil { + return err + } + + _, err := w.Write(v.Name[:v.Len]) + return err +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go new file mode 100644 index 0000000000..e3fc0766a9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go @@ -0,0 +1,501 @@ +package eventstream + +import ( + "encoding/base64" + "encoding/binary" + "fmt" + "io" + "strconv" + "time" +) + +const maxHeaderValueLen = 1<<15 - 1 // 2^15-1 or 32KB - 1 + +// valueType is the EventStream header value type. +type valueType uint8 + +// Header value types +const ( + trueValueType valueType = iota + falseValueType + int8ValueType // Byte + int16ValueType // Short + int32ValueType // Integer + int64ValueType // Long + bytesValueType + stringValueType + timestampValueType + uuidValueType +) + +func (t valueType) String() string { + switch t { + case trueValueType: + return "bool" + case falseValueType: + return "bool" + case int8ValueType: + return "int8" + case int16ValueType: + return "int16" + case int32ValueType: + return "int32" + case int64ValueType: + return "int64" + case bytesValueType: + return "byte_array" + case stringValueType: + return "string" + case timestampValueType: + return "timestamp" + case uuidValueType: + return "uuid" + default: + return fmt.Sprintf("unknown value type %d", uint8(t)) + } +} + +type rawValue struct { + Type valueType + Len uint16 // Only set for variable length slices + Value []byte // byte representation of value, BigEndian encoding. +} + +func (r rawValue) encodeScalar(w io.Writer, v interface{}) error { + return binaryWriteFields(w, binary.BigEndian, + r.Type, + v, + ) +} + +func (r rawValue) encodeFixedSlice(w io.Writer, v []byte) error { + binary.Write(w, binary.BigEndian, r.Type) + + _, err := w.Write(v) + return err +} + +func (r rawValue) encodeBytes(w io.Writer, v []byte) error { + if len(v) > maxHeaderValueLen { + return LengthError{ + Part: "header value", + Want: maxHeaderValueLen, Have: len(v), + Value: v, + } + } + r.Len = uint16(len(v)) + + err := binaryWriteFields(w, binary.BigEndian, + r.Type, + r.Len, + ) + if err != nil { + return err + } + + _, err = w.Write(v) + return err +} + +func (r rawValue) encodeString(w io.Writer, v string) error { + if len(v) > maxHeaderValueLen { + return LengthError{ + Part: "header value", + Want: maxHeaderValueLen, Have: len(v), + Value: v, + } + } + r.Len = uint16(len(v)) + + type stringWriter interface { + WriteString(string) (int, error) + } + + err := binaryWriteFields(w, binary.BigEndian, + r.Type, + r.Len, + ) + if err != nil { + return err + } + + if sw, ok := w.(stringWriter); ok { + _, err = sw.WriteString(v) + } else { + _, err = w.Write([]byte(v)) + } + + return err +} + +func decodeFixedBytesValue(r io.Reader, buf []byte) error { + _, err := io.ReadFull(r, buf) + return err +} + +func decodeBytesValue(r io.Reader) ([]byte, error) { + var raw rawValue + var err error + raw.Len, err = decodeUint16(r) + if err != nil { + return nil, err + } + + buf := make([]byte, raw.Len) + _, err = io.ReadFull(r, buf) + if err != nil { + return nil, err + } + + return buf, nil +} + +func decodeStringValue(r io.Reader) (string, error) { + v, err := decodeBytesValue(r) + return string(v), err +} + +// Value represents the abstract header value. +type Value interface { + Get() interface{} + String() string + valueType() valueType + encode(io.Writer) error +} + +// An BoolValue provides eventstream encoding, and representation +// of a Go bool value. +type BoolValue bool + +// Get returns the underlying type +func (v BoolValue) Get() interface{} { + return bool(v) +} + +// valueType returns the EventStream header value type value. +func (v BoolValue) valueType() valueType { + if v { + return trueValueType + } + return falseValueType +} + +func (v BoolValue) String() string { + return strconv.FormatBool(bool(v)) +} + +// encode encodes the BoolValue into an eventstream binary value +// representation. +func (v BoolValue) encode(w io.Writer) error { + return binary.Write(w, binary.BigEndian, v.valueType()) +} + +// An Int8Value provides eventstream encoding, and representation of a Go +// int8 value. +type Int8Value int8 + +// Get returns the underlying value. +func (v Int8Value) Get() interface{} { + return int8(v) +} + +// valueType returns the EventStream header value type value. +func (Int8Value) valueType() valueType { + return int8ValueType +} + +func (v Int8Value) String() string { + return fmt.Sprintf("0x%02x", int8(v)) +} + +// encode encodes the Int8Value into an eventstream binary value +// representation. +func (v Int8Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeScalar(w, v) +} + +func (v *Int8Value) decode(r io.Reader) error { + n, err := decodeUint8(r) + if err != nil { + return err + } + + *v = Int8Value(n) + return nil +} + +// An Int16Value provides eventstream encoding, and representation of a Go +// int16 value. +type Int16Value int16 + +// Get returns the underlying value. +func (v Int16Value) Get() interface{} { + return int16(v) +} + +// valueType returns the EventStream header value type value. +func (Int16Value) valueType() valueType { + return int16ValueType +} + +func (v Int16Value) String() string { + return fmt.Sprintf("0x%04x", int16(v)) +} + +// encode encodes the Int16Value into an eventstream binary value +// representation. +func (v Int16Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int16Value) decode(r io.Reader) error { + n, err := decodeUint16(r) + if err != nil { + return err + } + + *v = Int16Value(n) + return nil +} + +// An Int32Value provides eventstream encoding, and representation of a Go +// int32 value. +type Int32Value int32 + +// Get returns the underlying value. +func (v Int32Value) Get() interface{} { + return int32(v) +} + +// valueType returns the EventStream header value type value. +func (Int32Value) valueType() valueType { + return int32ValueType +} + +func (v Int32Value) String() string { + return fmt.Sprintf("0x%08x", int32(v)) +} + +// encode encodes the Int32Value into an eventstream binary value +// representation. +func (v Int32Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int32Value) decode(r io.Reader) error { + n, err := decodeUint32(r) + if err != nil { + return err + } + + *v = Int32Value(n) + return nil +} + +// An Int64Value provides eventstream encoding, and representation of a Go +// int64 value. +type Int64Value int64 + +// Get returns the underlying value. +func (v Int64Value) Get() interface{} { + return int64(v) +} + +// valueType returns the EventStream header value type value. +func (Int64Value) valueType() valueType { + return int64ValueType +} + +func (v Int64Value) String() string { + return fmt.Sprintf("0x%016x", int64(v)) +} + +// encode encodes the Int64Value into an eventstream binary value +// representation. +func (v Int64Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int64Value) decode(r io.Reader) error { + n, err := decodeUint64(r) + if err != nil { + return err + } + + *v = Int64Value(n) + return nil +} + +// An BytesValue provides eventstream encoding, and representation of a Go +// byte slice. +type BytesValue []byte + +// Get returns the underlying value. +func (v BytesValue) Get() interface{} { + return []byte(v) +} + +// valueType returns the EventStream header value type value. +func (BytesValue) valueType() valueType { + return bytesValueType +} + +func (v BytesValue) String() string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +// encode encodes the BytesValue into an eventstream binary value +// representation. +func (v BytesValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeBytes(w, []byte(v)) +} + +func (v *BytesValue) decode(r io.Reader) error { + buf, err := decodeBytesValue(r) + if err != nil { + return err + } + + *v = BytesValue(buf) + return nil +} + +// An StringValue provides eventstream encoding, and representation of a Go +// string. +type StringValue string + +// Get returns the underlying value. +func (v StringValue) Get() interface{} { + return string(v) +} + +// valueType returns the EventStream header value type value. +func (StringValue) valueType() valueType { + return stringValueType +} + +func (v StringValue) String() string { + return string(v) +} + +// encode encodes the StringValue into an eventstream binary value +// representation. +func (v StringValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeString(w, string(v)) +} + +func (v *StringValue) decode(r io.Reader) error { + s, err := decodeStringValue(r) + if err != nil { + return err + } + + *v = StringValue(s) + return nil +} + +// An TimestampValue provides eventstream encoding, and representation of a Go +// timestamp. +type TimestampValue time.Time + +// Get returns the underlying value. +func (v TimestampValue) Get() interface{} { + return time.Time(v) +} + +// valueType returns the EventStream header value type value. +func (TimestampValue) valueType() valueType { + return timestampValueType +} + +func (v TimestampValue) epochMilli() int64 { + nano := time.Time(v).UnixNano() + msec := nano / int64(time.Millisecond) + return msec +} + +func (v TimestampValue) String() string { + msec := v.epochMilli() + return strconv.FormatInt(msec, 10) +} + +// encode encodes the TimestampValue into an eventstream binary value +// representation. +func (v TimestampValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + msec := v.epochMilli() + return raw.encodeScalar(w, msec) +} + +func (v *TimestampValue) decode(r io.Reader) error { + n, err := decodeUint64(r) + if err != nil { + return err + } + + *v = TimestampValue(timeFromEpochMilli(int64(n))) + return nil +} + +func timeFromEpochMilli(t int64) time.Time { + secs := t / 1e3 + msec := t % 1e3 + return time.Unix(secs, msec*int64(time.Millisecond)).UTC() +} + +// An UUIDValue provides eventstream encoding, and representation of a UUID +// value. +type UUIDValue [16]byte + +// Get returns the underlying value. +func (v UUIDValue) Get() interface{} { + return v[:] +} + +// valueType returns the EventStream header value type value. +func (UUIDValue) valueType() valueType { + return uuidValueType +} + +func (v UUIDValue) String() string { + return fmt.Sprintf(`%X-%X-%X-%X-%X`, v[0:4], v[4:6], v[6:8], v[8:10], v[10:]) +} + +// encode encodes the UUIDValue into an eventstream binary value +// representation. +func (v UUIDValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeFixedSlice(w, v[:]) +} + +func (v *UUIDValue) decode(r io.Reader) error { + tv := (*v)[:] + return decodeFixedBytesValue(r, tv) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go new file mode 100644 index 0000000000..2dc012a66e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go @@ -0,0 +1,103 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "hash/crc32" +) + +const preludeLen = 8 +const preludeCRCLen = 4 +const msgCRCLen = 4 +const minMsgLen = preludeLen + preludeCRCLen + msgCRCLen +const maxPayloadLen = 1024 * 1024 * 16 // 16MB +const maxHeadersLen = 1024 * 128 // 128KB +const maxMsgLen = minMsgLen + maxHeadersLen + maxPayloadLen + +var crc32IEEETable = crc32.MakeTable(crc32.IEEE) + +// A Message provides the eventstream message representation. +type Message struct { + Headers Headers + Payload []byte +} + +func (m *Message) rawMessage() (rawMessage, error) { + var raw rawMessage + + if len(m.Headers) > 0 { + var headers bytes.Buffer + if err := encodeHeaders(&headers, m.Headers); err != nil { + return rawMessage{}, err + } + raw.Headers = headers.Bytes() + raw.HeadersLen = uint32(len(raw.Headers)) + } + + raw.Length = raw.HeadersLen + uint32(len(m.Payload)) + minMsgLen + + hash := crc32.New(crc32IEEETable) + binaryWriteFields(hash, binary.BigEndian, raw.Length, raw.HeadersLen) + raw.PreludeCRC = hash.Sum32() + + binaryWriteFields(hash, binary.BigEndian, raw.PreludeCRC) + + if raw.HeadersLen > 0 { + hash.Write(raw.Headers) + } + + // Read payload bytes and update hash for it as well. + if len(m.Payload) > 0 { + raw.Payload = m.Payload + hash.Write(raw.Payload) + } + + raw.CRC = hash.Sum32() + + return raw, nil +} + +type messagePrelude struct { + Length uint32 + HeadersLen uint32 + PreludeCRC uint32 +} + +func (p messagePrelude) PayloadLen() uint32 { + return p.Length - p.HeadersLen - minMsgLen +} + +func (p messagePrelude) ValidateLens() error { + if p.Length == 0 || p.Length > maxMsgLen { + return LengthError{ + Part: "message prelude", + Want: maxMsgLen, + Have: int(p.Length), + } + } + if p.HeadersLen > maxHeadersLen { + return LengthError{ + Part: "message headers", + Want: maxHeadersLen, + Have: int(p.HeadersLen), + } + } + if payloadLen := p.PayloadLen(); payloadLen > maxPayloadLen { + return LengthError{ + Part: "message payload", + Want: maxPayloadLen, + Have: int(payloadLen), + } + } + + return nil +} + +type rawMessage struct { + messagePrelude + + Headers []byte + Payload []byte + + CRC uint32 +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go new file mode 100644 index 0000000000..d7d42db0a6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go @@ -0,0 +1,68 @@ +package protocol + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// ValidateEndpointHostHandler is a request handler that will validate the +// request endpoint's hosts is a valid RFC 3986 host. +var ValidateEndpointHostHandler = request.NamedHandler{ + Name: "awssdk.protocol.ValidateEndpointHostHandler", + Fn: func(r *request.Request) { + err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host) + if err != nil { + r.Error = err + } + }, +} + +// ValidateEndpointHost validates that the host string passed in is a valid RFC +// 3986 host. Returns error if the host is not valid. +func ValidateEndpointHost(opName, host string) error { + paramErrs := request.ErrInvalidParams{Context: opName} + labels := strings.Split(host, ".") + + for i, label := range labels { + if i == len(labels)-1 && len(label) == 0 { + // Allow trailing dot for FQDN hosts. + continue + } + + if !ValidHostLabel(label) { + paramErrs.Add(request.NewErrParamFormat( + "endpoint host label", "[a-zA-Z0-9-]{1,63}", label)) + } + } + + if len(host) > 255 { + paramErrs.Add(request.NewErrParamMaxLen( + "endpoint host", 255, host, + )) + } + + if paramErrs.Len() > 0 { + return paramErrs + } + return nil +} + +// ValidHostLabel returns if the label is a valid RFC 3986 host label. +func ValidHostLabel(label string) bool { + if l := len(label); l == 0 || l > 63 { + return false + } + for _, r := range label { + switch { + case r >= '0' && r <= '9': + case r >= 'A' && r <= 'Z': + case r >= 'a' && r <= 'z': + case r == '-': + default: + return false + } + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go new file mode 100644 index 0000000000..915b0fcafd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go @@ -0,0 +1,54 @@ +package protocol + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// HostPrefixHandlerName is the handler name for the host prefix request +// handler. +const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler" + +// NewHostPrefixHandler constructs a build handler +func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) request.NamedHandler { + builder := HostPrefixBuilder{ + Prefix: prefix, + LabelsFn: labelsFn, + } + + return request.NamedHandler{ + Name: HostPrefixHandlerName, + Fn: builder.Build, + } +} + +// HostPrefixBuilder provides the request handler to expand and prepend +// the host prefix into the operation's request endpoint host. +type HostPrefixBuilder struct { + Prefix string + LabelsFn func() map[string]string +} + +// Build updates the passed in Request with the HostPrefix template expanded. +func (h HostPrefixBuilder) Build(r *request.Request) { + if aws.BoolValue(r.Config.DisableEndpointHostPrefix) { + return + } + + var labels map[string]string + if h.LabelsFn != nil { + labels = h.LabelsFn() + } + + prefix := h.Prefix + for name, value := range labels { + prefix = strings.Replace(prefix, "{"+name+"}", value, -1) + } + + r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host + if len(r.HTTPRequest.Host) > 0 { + r.HTTPRequest.Host = prefix + r.HTTPRequest.Host + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go new file mode 100644 index 0000000000..776d110184 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go @@ -0,0 +1,76 @@ +package protocol + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "strconv" + + "github.com/aws/aws-sdk-go/aws" +) + +// EscapeMode is the mode that should be use for escaping a value +type EscapeMode uint + +// The modes for escaping a value before it is marshaled, and unmarshaled. +const ( + NoEscape EscapeMode = iota + Base64Escape + QuotedEscape +) + +// EncodeJSONValue marshals the value into a JSON string, and optionally base64 +// encodes the string before returning it. +// +// Will panic if the escape mode is unknown. +func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) { + b, err := json.Marshal(v) + if err != nil { + return "", err + } + + switch escape { + case NoEscape: + return string(b), nil + case Base64Escape: + return base64.StdEncoding.EncodeToString(b), nil + case QuotedEscape: + return strconv.Quote(string(b)), nil + } + + panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape)) +} + +// DecodeJSONValue will attempt to decode the string input as a JSONValue. +// Optionally decoding base64 the value first before JSON unmarshaling. +// +// Will panic if the escape mode is unknown. +func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) { + var b []byte + var err error + + switch escape { + case NoEscape: + b = []byte(v) + case Base64Escape: + b, err = base64.StdEncoding.DecodeString(v) + case QuotedEscape: + var u string + u, err = strconv.Unquote(v) + b = []byte(u) + default: + panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape)) + } + + if err != nil { + return nil, err + } + + m := aws.JSONValue{} + err = json.Unmarshal(b, &m) + if err != nil { + return nil, err + } + + return m, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go new file mode 100644 index 0000000000..e21614a125 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go @@ -0,0 +1,81 @@ +package protocol + +import ( + "io" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// PayloadUnmarshaler provides the interface for unmarshaling a payload's +// reader into a SDK shape. +type PayloadUnmarshaler interface { + UnmarshalPayload(io.Reader, interface{}) error +} + +// HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a +// HandlerList. This provides the support for unmarshaling a payload reader to +// a shape without needing a SDK request first. +type HandlerPayloadUnmarshal struct { + Unmarshalers request.HandlerList +} + +// UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using +// the Unmarshalers HandlerList provided. Returns an error if unable +// unmarshaling fails. +func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error { + req := &request.Request{ + HTTPRequest: &http.Request{}, + HTTPResponse: &http.Response{ + StatusCode: 200, + Header: http.Header{}, + Body: ioutil.NopCloser(r), + }, + Data: v, + } + + h.Unmarshalers.Run(req) + + return req.Error +} + +// PayloadMarshaler provides the interface for marshaling a SDK shape into and +// io.Writer. +type PayloadMarshaler interface { + MarshalPayload(io.Writer, interface{}) error +} + +// HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList. +// This provides support for marshaling a SDK shape into an io.Writer without +// needing a SDK request first. +type HandlerPayloadMarshal struct { + Marshalers request.HandlerList +} + +// MarshalPayload marshals the SDK shape into the io.Writer using the +// Marshalers HandlerList provided. Returns an error if unable if marshal +// fails. +func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error { + req := request.New( + aws.Config{}, + metadata.ClientInfo{}, + request.Handlers{}, + nil, + &request.Operation{HTTPMethod: "GET"}, + v, + nil, + ) + + h.Marshalers.Run(req) + + if req.Error != nil { + return req.Error + } + + io.Copy(w, req.GetBody()) + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go index 18169f0f8c..60e5b09d54 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -25,7 +25,7 @@ func Build(r *request.Request) { return } - if r.ExpireTime == 0 { + if !r.IsPresigned() { r.HTTPRequest.Method = "POST" r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") r.SetBufferBody([]byte(body.Encode())) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go index 524ca952ad..75866d0121 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -121,6 +121,10 @@ func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string return nil } + if _, ok := value.Interface().([]byte); ok { + return q.parseScalar(v, value, prefix, tag) + } + // check for unflattened list member if !q.isEC2 && tag.Get("flattened") == "" { if listName := tag.Get("locationNameList"); listName == "" { @@ -229,7 +233,12 @@ func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, ta v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) case time.Time: const ISO8601UTC = "2006-01-02T15:04:05Z" - v.Set(name, value.UTC().Format(ISO8601UTC)) + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + v.Set(name, protocol.FormatTime(format, value)) default: return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go index e0f4d5a541..3495c73070 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -23,7 +23,11 @@ func Unmarshal(r *request.Request) { decoder := xml.NewDecoder(r.HTTPResponse.Body) err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") if err != nil { - r.Error = awserr.New("SerializationError", "failed decoding Query response", err) + r.Error = awserr.NewRequestFailure( + awserr.New("SerializationError", "failed decoding Query response", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) return } } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go index f214296171..46d354e826 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -28,7 +28,11 @@ func UnmarshalError(r *request.Request) { bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", "failed to read from query HTTP response body", err) + r.Error = awserr.NewRequestFailure( + awserr.New("SerializationError", "failed to read from query HTTP response body", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) return } @@ -61,6 +65,10 @@ func UnmarshalError(r *request.Request) { } // Failed to retrieve any error message from the response body - r.Error = awserr.New("SerializationError", - "failed to decode query XML error response", decodeErr) + r.Error = awserr.NewRequestFailure( + awserr.New("SerializationError", + "failed to decode query XML error response", decodeErr), + r.HTTPResponse.StatusCode, + r.RequestID, + ) } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go index 7161835649..b34f5258a4 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -4,7 +4,6 @@ package rest import ( "bytes" "encoding/base64" - "encoding/json" "fmt" "io" "net/http" @@ -18,11 +17,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" ) -// RFC822 returns an RFC822 formatted timestamp for AWS protocols -const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" - // Whether the byte value can be sent without escaping in AWS URLs var noEscape [256]bool @@ -252,13 +249,12 @@ func EscapePath(path string, encodeSep bool) string { return buf.String() } -func convertType(v reflect.Value, tag reflect.StructTag) (string, error) { +func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) { v = reflect.Indirect(v) if !v.IsValid() { return "", errValueNotSet } - var str string switch value := v.Interface().(type) { case string: str = value @@ -271,19 +267,28 @@ func convertType(v reflect.Value, tag reflect.StructTag) (string, error) { case float64: str = strconv.FormatFloat(value, 'f', -1, 64) case time.Time: - str = value.UTC().Format(RFC822) + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + if tag.Get("location") == "querystring" { + format = protocol.ISO8601TimeFormatName + } + } + str = protocol.FormatTime(format, value) case aws.JSONValue: - b, err := json.Marshal(value) - if err != nil { - return "", err + if len(value) == 0 { + return "", errValueNotSet } + escaping := protocol.NoEscape if tag.Get("location") == "header" { - str = base64.StdEncoding.EncodeToString(b) - } else { - str = string(b) + escaping = protocol.Base64Escape + } + str, err = protocol.EncodeJSONValue(value, escaping) + if err != nil { + return "", fmt.Errorf("unable to encode JSONValue, %v", err) } default: - err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type()) return "", err } return str, nil diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go index 7a779ee226..33fd53b126 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -3,7 +3,6 @@ package rest import ( "bytes" "encoding/base64" - "encoding/json" "fmt" "io" "io/ioutil" @@ -16,6 +15,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" ) // UnmarshalHandler is a named request handler for unmarshaling rest protocol requests @@ -198,23 +198,21 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro } v.Set(reflect.ValueOf(&f)) case *time.Time: - t, err := time.Parse(RFC822, header) + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + } + t, err := protocol.ParseTime(format, header) if err != nil { return err } v.Set(reflect.ValueOf(&t)) case aws.JSONValue: - b := []byte(header) - var err error + escaping := protocol.NoEscape if tag.Get("location") == "header" { - b, err = base64.StdEncoding.DecodeString(header) - if err != nil { - return err - } + escaping = protocol.Base64Escape } - - m := aws.JSONValue{} - err = json.Unmarshal(b, &m) + m, err := protocol.DecodeJSONValue(header, escaping) if err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go index 7bdf4c8538..b0f4e24566 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go @@ -36,7 +36,11 @@ func Build(r *request.Request) { var buf bytes.Buffer err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) if err != nil { - r.Error = awserr.New("SerializationError", "failed to encode rest XML request", err) + r.Error = awserr.NewRequestFailure( + awserr.New("SerializationError", "failed to encode rest XML request", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) return } r.SetBufferBody(buf.Bytes()) @@ -50,7 +54,11 @@ func Unmarshal(r *request.Request) { decoder := xml.NewDecoder(r.HTTPResponse.Body) err := xmlutil.UnmarshalXML(r.Data, decoder, "") if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST XML response", err) + r.Error = awserr.NewRequestFailure( + awserr.New("SerializationError", "failed to decode REST XML response", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) return } } else { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go new file mode 100644 index 0000000000..b7ed6c6f81 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go @@ -0,0 +1,72 @@ +package protocol + +import ( + "strconv" + "time" +) + +// Names of time formats supported by the SDK +const ( + RFC822TimeFormatName = "rfc822" + ISO8601TimeFormatName = "iso8601" + UnixTimeFormatName = "unixTimestamp" +) + +// Time formats supported by the SDK +const ( + // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT + RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + + // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z + ISO8601TimeFormat = "2006-01-02T15:04:05Z" +) + +// IsKnownTimestampFormat returns if the timestamp format name +// is know to the SDK's protocols. +func IsKnownTimestampFormat(name string) bool { + switch name { + case RFC822TimeFormatName: + fallthrough + case ISO8601TimeFormatName: + fallthrough + case UnixTimeFormatName: + return true + default: + return false + } +} + +// FormatTime returns a string value of the time. +func FormatTime(name string, t time.Time) string { + t = t.UTC() + + switch name { + case RFC822TimeFormatName: + return t.Format(RFC822TimeFormat) + case ISO8601TimeFormatName: + return t.Format(ISO8601TimeFormat) + case UnixTimeFormatName: + return strconv.FormatInt(t.Unix(), 10) + default: + panic("unknown timestamp format name, " + name) + } +} + +// ParseTime attempts to parse the time given the format. Returns +// the time if it was able to be parsed, and fails otherwise. +func ParseTime(formatName, value string) (time.Time, error) { + switch formatName { + case RFC822TimeFormatName: + return time.Parse(RFC822TimeFormat, value) + case ISO8601TimeFormatName: + return time.Parse(ISO8601TimeFormat, value) + case UnixTimeFormatName: + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return time.Time{}, err + } + return time.Unix(int64(v), 0), nil + default: + panic("unknown timestamp format name, " + formatName) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go index 7091b456d1..cf981fe951 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -13,9 +13,13 @@ import ( "github.com/aws/aws-sdk-go/private/protocol" ) -// BuildXML will serialize params into an xml.Encoder. -// Error will be returned if the serialization of any of the params or nested values fails. +// BuildXML will serialize params into an xml.Encoder. Error will be returned +// if the serialization of any of the params or nested values fails. func BuildXML(params interface{}, e *xml.Encoder) error { + return buildXML(params, e, false) +} + +func buildXML(params interface{}, e *xml.Encoder, sorted bool) error { b := xmlBuilder{encoder: e, namespaces: map[string]string{}} root := NewXMLElement(xml.Name{}) if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { @@ -23,7 +27,7 @@ func BuildXML(params interface{}, e *xml.Encoder) error { } for _, c := range root.Children { for _, v := range c { - return StructToXML(e, v, false) + return StructToXML(e, v, sorted) } } return nil @@ -83,15 +87,13 @@ func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag refle } } -// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested +// buildStruct adds a struct and its fields to the current XMLNode. All fields and any nested // types are converted to XMLNodes also. func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { if !value.IsValid() { return nil } - fieldAdded := false - // unwrap payloads if payload := tag.Get("payload"); payload != "" { field, _ := value.Type().FieldByName(payload) @@ -119,6 +121,8 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl child.Attr = append(child.Attr, ns) } + var payloadFields, nonPayloadFields int + t := value.Type() for i := 0; i < value.NumField(); i++ { member := elemOf(value.Field(i)) @@ -133,8 +137,10 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl mTag := field.Tag if mTag.Get("location") != "" { // skip non-body members + nonPayloadFields++ continue } + payloadFields++ if protocol.CanSetIdempotencyToken(value.Field(i), field) { token := protocol.GetIdempotencyToken() @@ -149,11 +155,11 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl if err := b.buildValue(member, child, mTag); err != nil { return err } - - fieldAdded = true } - if fieldAdded { // only append this child if we have one ore more valid members + // Only case where the child shape is not added is if the shape only contains + // non-payload fields, e.g headers/query. + if !(payloadFields == 0 && nonPayloadFields > 0) { current.AddChild(child) } @@ -278,8 +284,12 @@ func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag refl case float32: str = strconv.FormatFloat(float64(converted), 'f', -1, 32) case time.Time: - const ISO8601UTC = "2006-01-02T15:04:05Z" - str = converted.UTC().Format(ISO8601UTC) + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + str = protocol.FormatTime(format, converted) default: return fmt.Errorf("unsupported value for param %s: %v (%s)", tag.Get("locationName"), value.Interface(), value.Type().Name()) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go index 87584628a2..ff1ef6830b 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -9,6 +9,8 @@ import ( "strconv" "strings" "time" + + "github.com/aws/aws-sdk-go/private/protocol" ) // UnmarshalXML deserializes an xml.Decoder into the container v. V @@ -52,9 +54,15 @@ func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { if t == "" { switch rtype.Kind() { case reflect.Struct: - t = "structure" + // also it can't be a time object + if _, ok := r.Interface().(*time.Time); !ok { + t = "structure" + } case reflect.Slice: - t = "list" + // also it can't be a byte slice + if _, ok := r.Interface().([]byte); !ok { + t = "list" + } case reflect.Map: t = "map" } @@ -247,8 +255,12 @@ func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { } r.Set(reflect.ValueOf(&v)) case *time.Time: - const ISO8601UTC = "2006-01-02T15:04:05Z" - t, err := time.Parse(ISO8601UTC, node.Text) + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, node.Text) if err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go index 3e970b629d..515ce15215 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -29,6 +29,7 @@ func NewXMLElement(name xml.Name) *XMLNode { // AddChild adds child to the XMLNode. func (n *XMLNode) AddChild(child *XMLNode) { + child.parent = n if _, ok := n.Children[child.Name.Local]; !ok { n.Children[child.Name.Local] = []*XMLNode{} } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index 52ac02ca96..3b3c2e380c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -3,14 +3,22 @@ package s3 import ( + "bytes" "fmt" "io" + "sync" + "sync/atomic" "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" + "github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi" + "github.com/aws/aws-sdk-go/private/protocol/rest" "github.com/aws/aws-sdk-go/private/protocol/restxml" ) @@ -18,19 +26,18 @@ const opAbortMultipartUpload = "AbortMultipartUpload" // AbortMultipartUploadRequest generates a "aws/request.Request" representing the // client's request for the AbortMultipartUpload operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See AbortMultipartUpload for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AbortMultipartUpload method directly -// instead. +// See AbortMultipartUpload for more information on using the AbortMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AbortMultipartUploadRequest method. // req, resp := client.AbortMultipartUploadRequest(params) @@ -40,7 +47,7 @@ const opAbortMultipartUpload = "AbortMultipartUpload" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { op := &request.Operation{ Name: opAbortMultipartUpload, @@ -76,7 +83,7 @@ func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req // * ErrCodeNoSuchUpload "NoSuchUpload" // The specified multipart upload does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { req, out := c.AbortMultipartUploadRequest(input) return out, req.Send() @@ -102,19 +109,18 @@ const opCompleteMultipartUpload = "CompleteMultipartUpload" // CompleteMultipartUploadRequest generates a "aws/request.Request" representing the // client's request for the CompleteMultipartUpload operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CompleteMultipartUpload for usage and error information. +// See CompleteMultipartUpload for more information on using the CompleteMultipartUpload +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CompleteMultipartUpload method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CompleteMultipartUploadRequest method. // req, resp := client.CompleteMultipartUploadRequest(params) @@ -124,7 +130,7 @@ const opCompleteMultipartUpload = "CompleteMultipartUpload" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) { op := &request.Operation{ Name: opCompleteMultipartUpload, @@ -151,7 +157,7 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation CompleteMultipartUpload for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { req, out := c.CompleteMultipartUploadRequest(input) return out, req.Send() @@ -177,19 +183,18 @@ const opCopyObject = "CopyObject" // CopyObjectRequest generates a "aws/request.Request" representing the // client's request for the CopyObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See CopyObject for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CopyObject method directly -// instead. +// See CopyObject for more information on using the CopyObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CopyObjectRequest method. // req, resp := client.CopyObjectRequest(params) @@ -199,7 +204,7 @@ const opCopyObject = "CopyObject" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) { op := &request.Operation{ Name: opCopyObject, @@ -232,7 +237,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // The source object of the COPY operation is not in the active tier and is // only stored in Amazon Glacier. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { req, out := c.CopyObjectRequest(input) return out, req.Send() @@ -258,19 +263,18 @@ const opCreateBucket = "CreateBucket" // CreateBucketRequest generates a "aws/request.Request" representing the // client's request for the CreateBucket operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CreateBucket for usage and error information. +// See CreateBucket for more information on using the CreateBucket +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateBucket method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateBucketRequest method. // req, resp := client.CreateBucketRequest(params) @@ -280,7 +284,7 @@ const opCreateBucket = "CreateBucket" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) { op := &request.Operation{ Name: opCreateBucket, @@ -315,7 +319,7 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request // // * ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou" // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { req, out := c.CreateBucketRequest(input) return out, req.Send() @@ -341,19 +345,18 @@ const opCreateMultipartUpload = "CreateMultipartUpload" // CreateMultipartUploadRequest generates a "aws/request.Request" representing the // client's request for the CreateMultipartUpload operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See CreateMultipartUpload for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateMultipartUpload method directly -// instead. +// See CreateMultipartUpload for more information on using the CreateMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateMultipartUploadRequest method. // req, resp := client.CreateMultipartUploadRequest(params) @@ -363,7 +366,7 @@ const opCreateMultipartUpload = "CreateMultipartUpload" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) { op := &request.Operation{ Name: opCreateMultipartUpload, @@ -396,7 +399,7 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation CreateMultipartUpload for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) { req, out := c.CreateMultipartUploadRequest(input) return out, req.Send() @@ -422,19 +425,18 @@ const opDeleteBucket = "DeleteBucket" // DeleteBucketRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucket operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteBucket for usage and error information. +// See DeleteBucket for more information on using the DeleteBucket +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucket method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketRequest method. // req, resp := client.DeleteBucketRequest(params) @@ -444,7 +446,7 @@ const opDeleteBucket = "DeleteBucket" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) { op := &request.Operation{ Name: opDeleteBucket, @@ -458,8 +460,7 @@ func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request output = &DeleteBucketOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -474,7 +475,7 @@ func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucket for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { req, out := c.DeleteBucketRequest(input) return out, req.Send() @@ -500,19 +501,18 @@ const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration // DeleteBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketAnalyticsConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See DeleteBucketAnalyticsConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketAnalyticsConfiguration method directly -// instead. +// See DeleteBucketAnalyticsConfiguration for more information on using the DeleteBucketAnalyticsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketAnalyticsConfigurationRequest method. // req, resp := client.DeleteBucketAnalyticsConfigurationRequest(params) @@ -522,7 +522,7 @@ const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyticsConfigurationInput) (req *request.Request, output *DeleteBucketAnalyticsConfigurationOutput) { op := &request.Operation{ Name: opDeleteBucketAnalyticsConfiguration, @@ -536,8 +536,7 @@ func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyt output = &DeleteBucketAnalyticsConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -552,7 +551,7 @@ func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyt // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketAnalyticsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration func (c *S3) DeleteBucketAnalyticsConfiguration(input *DeleteBucketAnalyticsConfigurationInput) (*DeleteBucketAnalyticsConfigurationOutput, error) { req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) return out, req.Send() @@ -578,19 +577,18 @@ const opDeleteBucketCors = "DeleteBucketCors" // DeleteBucketCorsRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketCors operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteBucketCors for usage and error information. +// See DeleteBucketCors for more information on using the DeleteBucketCors +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketCors method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketCorsRequest method. // req, resp := client.DeleteBucketCorsRequest(params) @@ -600,7 +598,7 @@ const opDeleteBucketCors = "DeleteBucketCors" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) { op := &request.Operation{ Name: opDeleteBucketCors, @@ -614,14 +612,13 @@ func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request output = &DeleteBucketCorsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // DeleteBucketCors API operation for Amazon Simple Storage Service. // -// Deletes the cors configuration information set for the bucket. +// Deletes the CORS configuration information set for the bucket. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -629,7 +626,7 @@ func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketCors for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) { req, out := c.DeleteBucketCorsRequest(input) return out, req.Send() @@ -651,23 +648,97 @@ func (c *S3) DeleteBucketCorsWithContext(ctx aws.Context, input *DeleteBucketCor return out, req.Send() } +const opDeleteBucketEncryption = "DeleteBucketEncryption" + +// DeleteBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketEncryption for more information on using the DeleteBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketEncryptionRequest method. +// req, resp := client.DeleteBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption +func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) (req *request.Request, output *DeleteBucketEncryptionOutput) { + op := &request.Operation{ + Name: opDeleteBucketEncryption, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &DeleteBucketEncryptionInput{} + } + + output = &DeleteBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketEncryption API operation for Amazon Simple Storage Service. +// +// Deletes the server-side encryption configuration from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption +func (c *S3) DeleteBucketEncryption(input *DeleteBucketEncryptionInput) (*DeleteBucketEncryptionOutput, error) { + req, out := c.DeleteBucketEncryptionRequest(input) + return out, req.Send() +} + +// DeleteBucketEncryptionWithContext is the same as DeleteBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketEncryptionWithContext(ctx aws.Context, input *DeleteBucketEncryptionInput, opts ...request.Option) (*DeleteBucketEncryptionOutput, error) { + req, out := c.DeleteBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration" // DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketInventoryConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteBucketInventoryConfiguration for usage and error information. +// See DeleteBucketInventoryConfiguration for more information on using the DeleteBucketInventoryConfiguration +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketInventoryConfiguration method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketInventoryConfigurationRequest method. // req, resp := client.DeleteBucketInventoryConfigurationRequest(params) @@ -677,7 +748,7 @@ const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInventoryConfigurationInput) (req *request.Request, output *DeleteBucketInventoryConfigurationOutput) { op := &request.Operation{ Name: opDeleteBucketInventoryConfiguration, @@ -691,8 +762,7 @@ func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInvent output = &DeleteBucketInventoryConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -707,7 +777,7 @@ func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInvent // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketInventoryConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration func (c *S3) DeleteBucketInventoryConfiguration(input *DeleteBucketInventoryConfigurationInput) (*DeleteBucketInventoryConfigurationOutput, error) { req, out := c.DeleteBucketInventoryConfigurationRequest(input) return out, req.Send() @@ -733,19 +803,18 @@ const opDeleteBucketLifecycle = "DeleteBucketLifecycle" // DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketLifecycle operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See DeleteBucketLifecycle for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketLifecycle method directly -// instead. +// See DeleteBucketLifecycle for more information on using the DeleteBucketLifecycle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketLifecycleRequest method. // req, resp := client.DeleteBucketLifecycleRequest(params) @@ -755,7 +824,7 @@ const opDeleteBucketLifecycle = "DeleteBucketLifecycle" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) { op := &request.Operation{ Name: opDeleteBucketLifecycle, @@ -769,8 +838,7 @@ func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (re output = &DeleteBucketLifecycleOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -784,7 +852,7 @@ func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (re // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketLifecycle for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { req, out := c.DeleteBucketLifecycleRequest(input) return out, req.Send() @@ -810,19 +878,18 @@ const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration" // DeleteBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketMetricsConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteBucketMetricsConfiguration for usage and error information. +// See DeleteBucketMetricsConfiguration for more information on using the DeleteBucketMetricsConfiguration +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketMetricsConfiguration method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketMetricsConfigurationRequest method. // req, resp := client.DeleteBucketMetricsConfigurationRequest(params) @@ -832,7 +899,7 @@ const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsConfigurationInput) (req *request.Request, output *DeleteBucketMetricsConfigurationOutput) { op := &request.Operation{ Name: opDeleteBucketMetricsConfiguration, @@ -846,8 +913,7 @@ func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsC output = &DeleteBucketMetricsConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -862,7 +928,7 @@ func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsC // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketMetricsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration func (c *S3) DeleteBucketMetricsConfiguration(input *DeleteBucketMetricsConfigurationInput) (*DeleteBucketMetricsConfigurationOutput, error) { req, out := c.DeleteBucketMetricsConfigurationRequest(input) return out, req.Send() @@ -888,19 +954,18 @@ const opDeleteBucketPolicy = "DeleteBucketPolicy" // DeleteBucketPolicyRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketPolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See DeleteBucketPolicy for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketPolicy method directly -// instead. +// See DeleteBucketPolicy for more information on using the DeleteBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketPolicyRequest method. // req, resp := client.DeleteBucketPolicyRequest(params) @@ -910,7 +975,7 @@ const opDeleteBucketPolicy = "DeleteBucketPolicy" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) { op := &request.Operation{ Name: opDeleteBucketPolicy, @@ -924,8 +989,7 @@ func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *req output = &DeleteBucketPolicyOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -939,7 +1003,7 @@ func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketPolicy for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) { req, out := c.DeleteBucketPolicyRequest(input) return out, req.Send() @@ -965,19 +1029,18 @@ const opDeleteBucketReplication = "DeleteBucketReplication" // DeleteBucketReplicationRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketReplication operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteBucketReplication for usage and error information. +// See DeleteBucketReplication for more information on using the DeleteBucketReplication +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketReplication method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketReplicationRequest method. // req, resp := client.DeleteBucketReplicationRequest(params) @@ -987,7 +1050,7 @@ const opDeleteBucketReplication = "DeleteBucketReplication" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) { op := &request.Operation{ Name: opDeleteBucketReplication, @@ -1001,14 +1064,15 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) output = &DeleteBucketReplicationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // DeleteBucketReplication API operation for Amazon Simple Storage Service. // -// Deletes the replication configuration from the bucket. +// Deletes the replication configuration from the bucket. For information about +// replication configuration, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) +// in the Amazon S3 Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1016,7 +1080,7 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketReplication for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) { req, out := c.DeleteBucketReplicationRequest(input) return out, req.Send() @@ -1042,19 +1106,18 @@ const opDeleteBucketTagging = "DeleteBucketTagging" // DeleteBucketTaggingRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See DeleteBucketTagging for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketTagging method directly -// instead. +// See DeleteBucketTagging for more information on using the DeleteBucketTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketTaggingRequest method. // req, resp := client.DeleteBucketTaggingRequest(params) @@ -1064,7 +1127,7 @@ const opDeleteBucketTagging = "DeleteBucketTagging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) { op := &request.Operation{ Name: opDeleteBucketTagging, @@ -1078,8 +1141,7 @@ func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *r output = &DeleteBucketTaggingOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -1093,7 +1155,7 @@ func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *r // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) { req, out := c.DeleteBucketTaggingRequest(input) return out, req.Send() @@ -1119,19 +1181,18 @@ const opDeleteBucketWebsite = "DeleteBucketWebsite" // DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketWebsite operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteBucketWebsite for usage and error information. +// See DeleteBucketWebsite for more information on using the DeleteBucketWebsite +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketWebsite method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketWebsiteRequest method. // req, resp := client.DeleteBucketWebsiteRequest(params) @@ -1141,7 +1202,7 @@ const opDeleteBucketWebsite = "DeleteBucketWebsite" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) { op := &request.Operation{ Name: opDeleteBucketWebsite, @@ -1155,8 +1216,7 @@ func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *r output = &DeleteBucketWebsiteOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -1170,7 +1230,7 @@ func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *r // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketWebsite for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) { req, out := c.DeleteBucketWebsiteRequest(input) return out, req.Send() @@ -1196,19 +1256,18 @@ const opDeleteObject = "DeleteObject" // DeleteObjectRequest generates a "aws/request.Request" representing the // client's request for the DeleteObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See DeleteObject for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteObject method directly -// instead. +// See DeleteObject for more information on using the DeleteObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteObjectRequest method. // req, resp := client.DeleteObjectRequest(params) @@ -1218,7 +1277,7 @@ const opDeleteObject = "DeleteObject" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) { op := &request.Operation{ Name: opDeleteObject, @@ -1247,7 +1306,7 @@ func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteObject for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { req, out := c.DeleteObjectRequest(input) return out, req.Send() @@ -1273,19 +1332,18 @@ const opDeleteObjectTagging = "DeleteObjectTagging" // DeleteObjectTaggingRequest generates a "aws/request.Request" representing the // client's request for the DeleteObjectTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteObjectTagging for usage and error information. +// See DeleteObjectTagging for more information on using the DeleteObjectTagging +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteObjectTagging method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteObjectTaggingRequest method. // req, resp := client.DeleteObjectTaggingRequest(params) @@ -1295,7 +1353,7 @@ const opDeleteObjectTagging = "DeleteObjectTagging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *request.Request, output *DeleteObjectTaggingOutput) { op := &request.Operation{ Name: opDeleteObjectTagging, @@ -1322,7 +1380,7 @@ func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *r // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteObjectTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging func (c *S3) DeleteObjectTagging(input *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) { req, out := c.DeleteObjectTaggingRequest(input) return out, req.Send() @@ -1348,19 +1406,18 @@ const opDeleteObjects = "DeleteObjects" // DeleteObjectsRequest generates a "aws/request.Request" representing the // client's request for the DeleteObjects operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See DeleteObjects for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteObjects method directly -// instead. +// See DeleteObjects for more information on using the DeleteObjects +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteObjectsRequest method. // req, resp := client.DeleteObjectsRequest(params) @@ -1370,7 +1427,7 @@ const opDeleteObjects = "DeleteObjects" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) { op := &request.Operation{ Name: opDeleteObjects, @@ -1398,7 +1455,7 @@ func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Reque // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteObjects for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) { req, out := c.DeleteObjectsRequest(input) return out, req.Send() @@ -1420,23 +1477,97 @@ func (c *S3) DeleteObjectsWithContext(ctx aws.Context, input *DeleteObjectsInput return out, req.Send() } +const opDeletePublicAccessBlock = "DeletePublicAccessBlock" + +// DeletePublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the DeletePublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeletePublicAccessBlock for more information on using the DeletePublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeletePublicAccessBlockRequest method. +// req, resp := client.DeletePublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock +func (c *S3) DeletePublicAccessBlockRequest(input *DeletePublicAccessBlockInput) (req *request.Request, output *DeletePublicAccessBlockOutput) { + op := &request.Operation{ + Name: opDeletePublicAccessBlock, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?publicAccessBlock", + } + + if input == nil { + input = &DeletePublicAccessBlockInput{} + } + + output = &DeletePublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeletePublicAccessBlock API operation for Amazon Simple Storage Service. +// +// Removes the PublicAccessBlock configuration from an Amazon S3 bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeletePublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock +func (c *S3) DeletePublicAccessBlock(input *DeletePublicAccessBlockInput) (*DeletePublicAccessBlockOutput, error) { + req, out := c.DeletePublicAccessBlockRequest(input) + return out, req.Send() +} + +// DeletePublicAccessBlockWithContext is the same as DeletePublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See DeletePublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeletePublicAccessBlockWithContext(ctx aws.Context, input *DeletePublicAccessBlockInput, opts ...request.Option) (*DeletePublicAccessBlockOutput, error) { + req, out := c.DeletePublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" // GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketAccelerateConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See GetBucketAccelerateConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketAccelerateConfiguration method directly -// instead. +// See GetBucketAccelerateConfiguration for more information on using the GetBucketAccelerateConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketAccelerateConfigurationRequest method. // req, resp := client.GetBucketAccelerateConfigurationRequest(params) @@ -1446,7 +1577,7 @@ const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) { op := &request.Operation{ Name: opGetBucketAccelerateConfiguration, @@ -1473,7 +1604,7 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketAccelerateConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) { req, out := c.GetBucketAccelerateConfigurationRequest(input) return out, req.Send() @@ -1499,19 +1630,18 @@ const opGetBucketAcl = "GetBucketAcl" // GetBucketAclRequest generates a "aws/request.Request" representing the // client's request for the GetBucketAcl operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetBucketAcl for usage and error information. +// See GetBucketAcl for more information on using the GetBucketAcl +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketAcl method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketAclRequest method. // req, resp := client.GetBucketAclRequest(params) @@ -1521,7 +1651,7 @@ const opGetBucketAcl = "GetBucketAcl" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) { op := &request.Operation{ Name: opGetBucketAcl, @@ -1548,7 +1678,7 @@ func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketAcl for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) { req, out := c.GetBucketAclRequest(input) return out, req.Send() @@ -1574,19 +1704,18 @@ const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration" // GetBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketAnalyticsConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See GetBucketAnalyticsConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketAnalyticsConfiguration method directly -// instead. +// See GetBucketAnalyticsConfiguration for more information on using the GetBucketAnalyticsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketAnalyticsConfigurationRequest method. // req, resp := client.GetBucketAnalyticsConfigurationRequest(params) @@ -1596,7 +1725,7 @@ const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsConfigurationInput) (req *request.Request, output *GetBucketAnalyticsConfigurationOutput) { op := &request.Operation{ Name: opGetBucketAnalyticsConfiguration, @@ -1624,7 +1753,7 @@ func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsCon // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketAnalyticsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration func (c *S3) GetBucketAnalyticsConfiguration(input *GetBucketAnalyticsConfigurationInput) (*GetBucketAnalyticsConfigurationOutput, error) { req, out := c.GetBucketAnalyticsConfigurationRequest(input) return out, req.Send() @@ -1650,19 +1779,18 @@ const opGetBucketCors = "GetBucketCors" // GetBucketCorsRequest generates a "aws/request.Request" representing the // client's request for the GetBucketCors operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetBucketCors for usage and error information. +// See GetBucketCors for more information on using the GetBucketCors +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketCors method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketCorsRequest method. // req, resp := client.GetBucketCorsRequest(params) @@ -1672,7 +1800,7 @@ const opGetBucketCors = "GetBucketCors" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) { op := &request.Operation{ Name: opGetBucketCors, @@ -1691,7 +1819,7 @@ func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Reque // GetBucketCors API operation for Amazon Simple Storage Service. // -// Returns the cors configuration for the bucket. +// Returns the CORS configuration for the bucket. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1699,7 +1827,7 @@ func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Reque // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketCors for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) { req, out := c.GetBucketCorsRequest(input) return out, req.Send() @@ -1721,23 +1849,96 @@ func (c *S3) GetBucketCorsWithContext(ctx aws.Context, input *GetBucketCorsInput return out, req.Send() } +const opGetBucketEncryption = "GetBucketEncryption" + +// GetBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketEncryption for more information on using the GetBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketEncryptionRequest method. +// req, resp := client.GetBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption +func (c *S3) GetBucketEncryptionRequest(input *GetBucketEncryptionInput) (req *request.Request, output *GetBucketEncryptionOutput) { + op := &request.Operation{ + Name: opGetBucketEncryption, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &GetBucketEncryptionInput{} + } + + output = &GetBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketEncryption API operation for Amazon Simple Storage Service. +// +// Returns the server-side encryption configuration of a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption +func (c *S3) GetBucketEncryption(input *GetBucketEncryptionInput) (*GetBucketEncryptionOutput, error) { + req, out := c.GetBucketEncryptionRequest(input) + return out, req.Send() +} + +// GetBucketEncryptionWithContext is the same as GetBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketEncryptionWithContext(ctx aws.Context, input *GetBucketEncryptionInput, opts ...request.Option) (*GetBucketEncryptionOutput, error) { + req, out := c.GetBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration" // GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketInventoryConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetBucketInventoryConfiguration for usage and error information. +// See GetBucketInventoryConfiguration for more information on using the GetBucketInventoryConfiguration +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketInventoryConfiguration method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketInventoryConfigurationRequest method. // req, resp := client.GetBucketInventoryConfigurationRequest(params) @@ -1747,7 +1948,7 @@ const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryConfigurationInput) (req *request.Request, output *GetBucketInventoryConfigurationOutput) { op := &request.Operation{ Name: opGetBucketInventoryConfiguration, @@ -1775,7 +1976,7 @@ func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryCon // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketInventoryConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration func (c *S3) GetBucketInventoryConfiguration(input *GetBucketInventoryConfigurationInput) (*GetBucketInventoryConfigurationOutput, error) { req, out := c.GetBucketInventoryConfigurationRequest(input) return out, req.Send() @@ -1801,19 +2002,18 @@ const opGetBucketLifecycle = "GetBucketLifecycle" // GetBucketLifecycleRequest generates a "aws/request.Request" representing the // client's request for the GetBucketLifecycle operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See GetBucketLifecycle for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketLifecycle method directly -// instead. +// See GetBucketLifecycle for more information on using the GetBucketLifecycle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketLifecycleRequest method. // req, resp := client.GetBucketLifecycleRequest(params) @@ -1823,7 +2023,9 @@ const opGetBucketLifecycle = "GetBucketLifecycle" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +// +// Deprecated: GetBucketLifecycle has been deprecated func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) { if c.Client.Config.Logger != nil { c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated") @@ -1853,7 +2055,9 @@ func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketLifecycle for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +// +// Deprecated: GetBucketLifecycle has been deprecated func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) { req, out := c.GetBucketLifecycleRequest(input) return out, req.Send() @@ -1868,6 +2072,8 @@ func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifec // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. +// +// Deprecated: GetBucketLifecycleWithContext has been deprecated func (c *S3) GetBucketLifecycleWithContext(ctx aws.Context, input *GetBucketLifecycleInput, opts ...request.Option) (*GetBucketLifecycleOutput, error) { req, out := c.GetBucketLifecycleRequest(input) req.SetContext(ctx) @@ -1879,19 +2085,18 @@ const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" // GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketLifecycleConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See GetBucketLifecycleConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketLifecycleConfiguration method directly -// instead. +// See GetBucketLifecycleConfiguration for more information on using the GetBucketLifecycleConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketLifecycleConfigurationRequest method. // req, resp := client.GetBucketLifecycleConfigurationRequest(params) @@ -1901,7 +2106,7 @@ const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { op := &request.Operation{ Name: opGetBucketLifecycleConfiguration, @@ -1928,7 +2133,7 @@ func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleCon // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketLifecycleConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) { req, out := c.GetBucketLifecycleConfigurationRequest(input) return out, req.Send() @@ -1954,19 +2159,18 @@ const opGetBucketLocation = "GetBucketLocation" // GetBucketLocationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketLocation operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetBucketLocation for usage and error information. +// See GetBucketLocation for more information on using the GetBucketLocation +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketLocation method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketLocationRequest method. // req, resp := client.GetBucketLocationRequest(params) @@ -1976,7 +2180,7 @@ const opGetBucketLocation = "GetBucketLocation" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { op := &request.Operation{ Name: opGetBucketLocation, @@ -2003,7 +2207,7 @@ func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *reque // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketLocation for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { req, out := c.GetBucketLocationRequest(input) return out, req.Send() @@ -2029,19 +2233,18 @@ const opGetBucketLogging = "GetBucketLogging" // GetBucketLoggingRequest generates a "aws/request.Request" representing the // client's request for the GetBucketLogging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See GetBucketLogging for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketLogging method directly -// instead. +// See GetBucketLogging for more information on using the GetBucketLogging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketLoggingRequest method. // req, resp := client.GetBucketLoggingRequest(params) @@ -2051,7 +2254,7 @@ const opGetBucketLogging = "GetBucketLogging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { op := &request.Operation{ Name: opGetBucketLogging, @@ -2079,7 +2282,7 @@ func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketLogging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) { req, out := c.GetBucketLoggingRequest(input) return out, req.Send() @@ -2105,19 +2308,18 @@ const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration" // GetBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketMetricsConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetBucketMetricsConfiguration for usage and error information. +// See GetBucketMetricsConfiguration for more information on using the GetBucketMetricsConfiguration +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketMetricsConfiguration method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketMetricsConfigurationRequest method. // req, resp := client.GetBucketMetricsConfigurationRequest(params) @@ -2127,7 +2329,7 @@ const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigurationInput) (req *request.Request, output *GetBucketMetricsConfigurationOutput) { op := &request.Operation{ Name: opGetBucketMetricsConfiguration, @@ -2155,7 +2357,7 @@ func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigu // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketMetricsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration func (c *S3) GetBucketMetricsConfiguration(input *GetBucketMetricsConfigurationInput) (*GetBucketMetricsConfigurationOutput, error) { req, out := c.GetBucketMetricsConfigurationRequest(input) return out, req.Send() @@ -2181,19 +2383,18 @@ const opGetBucketNotification = "GetBucketNotification" // GetBucketNotificationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketNotification operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See GetBucketNotification for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketNotification method directly -// instead. +// See GetBucketNotification for more information on using the GetBucketNotification +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketNotificationRequest method. // req, resp := client.GetBucketNotificationRequest(params) @@ -2203,7 +2404,9 @@ const opGetBucketNotification = "GetBucketNotification" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +// +// Deprecated: GetBucketNotification has been deprecated func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) { if c.Client.Config.Logger != nil { c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated") @@ -2233,7 +2436,9 @@ func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurat // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketNotification for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +// +// Deprecated: GetBucketNotification has been deprecated func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) { req, out := c.GetBucketNotificationRequest(input) return out, req.Send() @@ -2248,6 +2453,8 @@ func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequ // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. +// +// Deprecated: GetBucketNotificationWithContext has been deprecated func (c *S3) GetBucketNotificationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfigurationDeprecated, error) { req, out := c.GetBucketNotificationRequest(input) req.SetContext(ctx) @@ -2259,19 +2466,18 @@ const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration // GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketNotificationConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See GetBucketNotificationConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketNotificationConfiguration method directly -// instead. +// See GetBucketNotificationConfiguration for more information on using the GetBucketNotificationConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketNotificationConfigurationRequest method. // req, resp := client.GetBucketNotificationConfigurationRequest(params) @@ -2281,7 +2487,7 @@ const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) { op := &request.Operation{ Name: opGetBucketNotificationConfiguration, @@ -2308,7 +2514,7 @@ func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificat // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketNotificationConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) { req, out := c.GetBucketNotificationConfigurationRequest(input) return out, req.Send() @@ -2334,19 +2540,18 @@ const opGetBucketPolicy = "GetBucketPolicy" // GetBucketPolicyRequest generates a "aws/request.Request" representing the // client's request for the GetBucketPolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetBucketPolicy for usage and error information. +// See GetBucketPolicy for more information on using the GetBucketPolicy +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketPolicy method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketPolicyRequest method. // req, resp := client.GetBucketPolicyRequest(params) @@ -2356,7 +2561,7 @@ const opGetBucketPolicy = "GetBucketPolicy" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) { op := &request.Operation{ Name: opGetBucketPolicy, @@ -2383,7 +2588,7 @@ func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.R // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketPolicy for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) { req, out := c.GetBucketPolicyRequest(input) return out, req.Send() @@ -2405,23 +2610,97 @@ func (c *S3) GetBucketPolicyWithContext(ctx aws.Context, input *GetBucketPolicyI return out, req.Send() } +const opGetBucketPolicyStatus = "GetBucketPolicyStatus" + +// GetBucketPolicyStatusRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketPolicyStatus operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketPolicyStatus for more information on using the GetBucketPolicyStatus +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketPolicyStatusRequest method. +// req, resp := client.GetBucketPolicyStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatus +func (c *S3) GetBucketPolicyStatusRequest(input *GetBucketPolicyStatusInput) (req *request.Request, output *GetBucketPolicyStatusOutput) { + op := &request.Operation{ + Name: opGetBucketPolicyStatus, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?policyStatus", + } + + if input == nil { + input = &GetBucketPolicyStatusInput{} + } + + output = &GetBucketPolicyStatusOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketPolicyStatus API operation for Amazon Simple Storage Service. +// +// Retrieves the policy status for an Amazon S3 bucket, indicating whether the +// bucket is public. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketPolicyStatus for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatus +func (c *S3) GetBucketPolicyStatus(input *GetBucketPolicyStatusInput) (*GetBucketPolicyStatusOutput, error) { + req, out := c.GetBucketPolicyStatusRequest(input) + return out, req.Send() +} + +// GetBucketPolicyStatusWithContext is the same as GetBucketPolicyStatus with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketPolicyStatus for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketPolicyStatusWithContext(ctx aws.Context, input *GetBucketPolicyStatusInput, opts ...request.Option) (*GetBucketPolicyStatusOutput, error) { + req, out := c.GetBucketPolicyStatusRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetBucketReplication = "GetBucketReplication" // GetBucketReplicationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketReplication operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetBucketReplication for usage and error information. +// See GetBucketReplication for more information on using the GetBucketReplication +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketReplication method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketReplicationRequest method. // req, resp := client.GetBucketReplicationRequest(params) @@ -2431,7 +2710,7 @@ const opGetBucketReplication = "GetBucketReplication" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) { op := &request.Operation{ Name: opGetBucketReplication, @@ -2452,13 +2731,17 @@ func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req // // Returns the replication configuration of a bucket. // +// It can take a while to propagate the put or delete a replication configuration +// to all Amazon S3 systems. Therefore, a get request soon after put or delete +// can return a wrong result. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketReplication for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { req, out := c.GetBucketReplicationRequest(input) return out, req.Send() @@ -2484,19 +2767,18 @@ const opGetBucketRequestPayment = "GetBucketRequestPayment" // GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the // client's request for the GetBucketRequestPayment operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetBucketRequestPayment for usage and error information. +// See GetBucketRequestPayment for more information on using the GetBucketRequestPayment +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketRequestPayment method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketRequestPaymentRequest method. // req, resp := client.GetBucketRequestPaymentRequest(params) @@ -2506,7 +2788,7 @@ const opGetBucketRequestPayment = "GetBucketRequestPayment" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) { op := &request.Operation{ Name: opGetBucketRequestPayment, @@ -2533,7 +2815,7 @@ func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketRequestPayment for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) { req, out := c.GetBucketRequestPaymentRequest(input) return out, req.Send() @@ -2559,19 +2841,18 @@ const opGetBucketTagging = "GetBucketTagging" // GetBucketTaggingRequest generates a "aws/request.Request" representing the // client's request for the GetBucketTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See GetBucketTagging for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketTagging method directly -// instead. +// See GetBucketTagging for more information on using the GetBucketTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketTaggingRequest method. // req, resp := client.GetBucketTaggingRequest(params) @@ -2581,7 +2862,7 @@ const opGetBucketTagging = "GetBucketTagging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) { op := &request.Operation{ Name: opGetBucketTagging, @@ -2608,7 +2889,7 @@ func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) { req, out := c.GetBucketTaggingRequest(input) return out, req.Send() @@ -2634,19 +2915,18 @@ const opGetBucketVersioning = "GetBucketVersioning" // GetBucketVersioningRequest generates a "aws/request.Request" representing the // client's request for the GetBucketVersioning operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetBucketVersioning for usage and error information. +// See GetBucketVersioning for more information on using the GetBucketVersioning +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketVersioning method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketVersioningRequest method. // req, resp := client.GetBucketVersioningRequest(params) @@ -2656,7 +2936,7 @@ const opGetBucketVersioning = "GetBucketVersioning" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) { op := &request.Operation{ Name: opGetBucketVersioning, @@ -2683,7 +2963,7 @@ func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *r // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketVersioning for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) { req, out := c.GetBucketVersioningRequest(input) return out, req.Send() @@ -2709,19 +2989,18 @@ const opGetBucketWebsite = "GetBucketWebsite" // GetBucketWebsiteRequest generates a "aws/request.Request" representing the // client's request for the GetBucketWebsite operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See GetBucketWebsite for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketWebsite method directly -// instead. +// See GetBucketWebsite for more information on using the GetBucketWebsite +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketWebsiteRequest method. // req, resp := client.GetBucketWebsiteRequest(params) @@ -2731,7 +3010,7 @@ const opGetBucketWebsite = "GetBucketWebsite" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) { op := &request.Operation{ Name: opGetBucketWebsite, @@ -2758,7 +3037,7 @@ func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketWebsite for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) { req, out := c.GetBucketWebsiteRequest(input) return out, req.Send() @@ -2784,19 +3063,18 @@ const opGetObject = "GetObject" // GetObjectRequest generates a "aws/request.Request" representing the // client's request for the GetObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetObject for usage and error information. +// See GetObject for more information on using the GetObject +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetObject method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetObjectRequest method. // req, resp := client.GetObjectRequest(params) @@ -2806,7 +3084,7 @@ const opGetObject = "GetObject" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) { op := &request.Operation{ Name: opGetObject, @@ -2838,7 +3116,7 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // * ErrCodeNoSuchKey "NoSuchKey" // The specified key does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { req, out := c.GetObjectRequest(input) return out, req.Send() @@ -2864,19 +3142,18 @@ const opGetObjectAcl = "GetObjectAcl" // GetObjectAclRequest generates a "aws/request.Request" representing the // client's request for the GetObjectAcl operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See GetObjectAcl for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetObjectAcl method directly -// instead. +// See GetObjectAcl for more information on using the GetObjectAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetObjectAclRequest method. // req, resp := client.GetObjectAclRequest(params) @@ -2886,7 +3163,7 @@ const opGetObjectAcl = "GetObjectAcl" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) { op := &request.Operation{ Name: opGetObjectAcl, @@ -2918,7 +3195,7 @@ func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request // * ErrCodeNoSuchKey "NoSuchKey" // The specified key does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) { req, out := c.GetObjectAclRequest(input) return out, req.Send() @@ -2940,120 +3217,342 @@ func (c *S3) GetObjectAclWithContext(ctx aws.Context, input *GetObjectAclInput, return out, req.Send() } -const opGetObjectTagging = "GetObjectTagging" +const opGetObjectLegalHold = "GetObjectLegalHold" -// GetObjectTaggingRequest generates a "aws/request.Request" representing the -// client's request for the GetObjectTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// GetObjectLegalHoldRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectLegalHold operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// See GetObjectTagging for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetObjectTagging method directly -// instead. +// See GetObjectLegalHold for more information on using the GetObjectLegalHold +// API call, and error handling. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the GetObjectTaggingRequest method. -// req, resp := client.GetObjectTaggingRequest(params) +// +// // Example sending a request using the GetObjectLegalHoldRequest method. +// req, resp := client.GetObjectLegalHoldRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging -func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request.Request, output *GetObjectTaggingOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold +func (c *S3) GetObjectLegalHoldRequest(input *GetObjectLegalHoldInput) (req *request.Request, output *GetObjectLegalHoldOutput) { op := &request.Operation{ - Name: opGetObjectTagging, + Name: opGetObjectLegalHold, HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}?tagging", + HTTPPath: "/{Bucket}/{Key+}?legal-hold", } if input == nil { - input = &GetObjectTaggingInput{} + input = &GetObjectLegalHoldInput{} } - output = &GetObjectTaggingOutput{} + output = &GetObjectLegalHoldOutput{} req = c.newRequest(op, input, output) return } -// GetObjectTagging API operation for Amazon Simple Storage Service. +// GetObjectLegalHold API operation for Amazon Simple Storage Service. // -// Returns the tag-set of an object. +// Gets an object's current Legal Hold status. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetObjectTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging -func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) { - req, out := c.GetObjectTaggingRequest(input) +// API operation GetObjectLegalHold for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold +func (c *S3) GetObjectLegalHold(input *GetObjectLegalHoldInput) (*GetObjectLegalHoldOutput, error) { + req, out := c.GetObjectLegalHoldRequest(input) return out, req.Send() } -// GetObjectTaggingWithContext is the same as GetObjectTagging with the addition of +// GetObjectLegalHoldWithContext is the same as GetObjectLegalHold with the addition of // the ability to pass a context and additional request options. // -// See GetObjectTagging for details on how to use this API operation. +// See GetObjectLegalHold for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) GetObjectTaggingWithContext(ctx aws.Context, input *GetObjectTaggingInput, opts ...request.Option) (*GetObjectTaggingOutput, error) { - req, out := c.GetObjectTaggingRequest(input) +func (c *S3) GetObjectLegalHoldWithContext(ctx aws.Context, input *GetObjectLegalHoldInput, opts ...request.Option) (*GetObjectLegalHoldOutput, error) { + req, out := c.GetObjectLegalHoldRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetObjectTorrent = "GetObjectTorrent" +const opGetObjectLockConfiguration = "GetObjectLockConfiguration" -// GetObjectTorrentRequest generates a "aws/request.Request" representing the -// client's request for the GetObjectTorrent operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// GetObjectLockConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectLockConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// See GetObjectTorrent for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetObjectTorrent method directly -// instead. +// See GetObjectLockConfiguration for more information on using the GetObjectLockConfiguration +// API call, and error handling. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the GetObjectTorrentRequest method. -// req, resp := client.GetObjectTorrentRequest(params) +// +// // Example sending a request using the GetObjectLockConfigurationRequest method. +// req, resp := client.GetObjectLockConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent -func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration +func (c *S3) GetObjectLockConfigurationRequest(input *GetObjectLockConfigurationInput) (req *request.Request, output *GetObjectLockConfigurationOutput) { op := &request.Operation{ - Name: opGetObjectTorrent, + Name: opGetObjectLockConfiguration, HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}?torrent", + HTTPPath: "/{Bucket}?object-lock", } if input == nil { - input = &GetObjectTorrentInput{} + input = &GetObjectLockConfigurationInput{} } - output = &GetObjectTorrentOutput{} + output = &GetObjectLockConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectLockConfiguration API operation for Amazon Simple Storage Service. +// +// Gets the Object Lock configuration for a bucket. The rule specified in the +// Object Lock configuration will be applied by default to every new object +// placed in the specified bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectLockConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration +func (c *S3) GetObjectLockConfiguration(input *GetObjectLockConfigurationInput) (*GetObjectLockConfigurationOutput, error) { + req, out := c.GetObjectLockConfigurationRequest(input) + return out, req.Send() +} + +// GetObjectLockConfigurationWithContext is the same as GetObjectLockConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectLockConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectLockConfigurationWithContext(ctx aws.Context, input *GetObjectLockConfigurationInput, opts ...request.Option) (*GetObjectLockConfigurationOutput, error) { + req, out := c.GetObjectLockConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectRetention = "GetObjectRetention" + +// GetObjectRetentionRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectRetention operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectRetention for more information on using the GetObjectRetention +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectRetentionRequest method. +// req, resp := client.GetObjectRetentionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention +func (c *S3) GetObjectRetentionRequest(input *GetObjectRetentionInput) (req *request.Request, output *GetObjectRetentionOutput) { + op := &request.Operation{ + Name: opGetObjectRetention, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?retention", + } + + if input == nil { + input = &GetObjectRetentionInput{} + } + + output = &GetObjectRetentionOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectRetention API operation for Amazon Simple Storage Service. +// +// Retrieves an object's retention settings. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectRetention for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention +func (c *S3) GetObjectRetention(input *GetObjectRetentionInput) (*GetObjectRetentionOutput, error) { + req, out := c.GetObjectRetentionRequest(input) + return out, req.Send() +} + +// GetObjectRetentionWithContext is the same as GetObjectRetention with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectRetention for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectRetentionWithContext(ctx aws.Context, input *GetObjectRetentionInput, opts ...request.Option) (*GetObjectRetentionOutput, error) { + req, out := c.GetObjectRetentionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectTagging = "GetObjectTagging" + +// GetObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectTagging for more information on using the GetObjectTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectTaggingRequest method. +// req, resp := client.GetObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request.Request, output *GetObjectTaggingOutput) { + op := &request.Operation{ + Name: opGetObjectTagging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &GetObjectTaggingInput{} + } + + output = &GetObjectTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectTagging API operation for Amazon Simple Storage Service. +// +// Returns the tag-set of an object. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) { + req, out := c.GetObjectTaggingRequest(input) + return out, req.Send() +} + +// GetObjectTaggingWithContext is the same as GetObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectTaggingWithContext(ctx aws.Context, input *GetObjectTaggingInput, opts ...request.Option) (*GetObjectTaggingOutput, error) { + req, out := c.GetObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectTorrent = "GetObjectTorrent" + +// GetObjectTorrentRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectTorrent operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectTorrent for more information on using the GetObjectTorrent +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectTorrentRequest method. +// req, resp := client.GetObjectTorrentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { + op := &request.Operation{ + Name: opGetObjectTorrent, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?torrent", + } + + if input == nil { + input = &GetObjectTorrentInput{} + } + + output = &GetObjectTorrentOutput{} req = c.newRequest(op, input, output) return } @@ -3068,7 +3567,7 @@ func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetObjectTorrent for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { req, out := c.GetObjectTorrentRequest(input) return out, req.Send() @@ -3090,23 +3589,96 @@ func (c *S3) GetObjectTorrentWithContext(ctx aws.Context, input *GetObjectTorren return out, req.Send() } +const opGetPublicAccessBlock = "GetPublicAccessBlock" + +// GetPublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the GetPublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetPublicAccessBlock for more information on using the GetPublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetPublicAccessBlockRequest method. +// req, resp := client.GetPublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock +func (c *S3) GetPublicAccessBlockRequest(input *GetPublicAccessBlockInput) (req *request.Request, output *GetPublicAccessBlockOutput) { + op := &request.Operation{ + Name: opGetPublicAccessBlock, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?publicAccessBlock", + } + + if input == nil { + input = &GetPublicAccessBlockInput{} + } + + output = &GetPublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetPublicAccessBlock API operation for Amazon Simple Storage Service. +// +// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetPublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock +func (c *S3) GetPublicAccessBlock(input *GetPublicAccessBlockInput) (*GetPublicAccessBlockOutput, error) { + req, out := c.GetPublicAccessBlockRequest(input) + return out, req.Send() +} + +// GetPublicAccessBlockWithContext is the same as GetPublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See GetPublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetPublicAccessBlockWithContext(ctx aws.Context, input *GetPublicAccessBlockInput, opts ...request.Option) (*GetPublicAccessBlockOutput, error) { + req, out := c.GetPublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opHeadBucket = "HeadBucket" // HeadBucketRequest generates a "aws/request.Request" representing the // client's request for the HeadBucket operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See HeadBucket for usage and error information. +// See HeadBucket for more information on using the HeadBucket +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the HeadBucket method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the HeadBucketRequest method. // req, resp := client.HeadBucketRequest(params) @@ -3116,7 +3688,7 @@ const opHeadBucket = "HeadBucket" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { op := &request.Operation{ Name: opHeadBucket, @@ -3130,8 +3702,7 @@ func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, ou output = &HeadBucketOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -3151,7 +3722,7 @@ func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, ou // * ErrCodeNoSuchBucket "NoSuchBucket" // The specified bucket does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { req, out := c.HeadBucketRequest(input) return out, req.Send() @@ -3177,19 +3748,18 @@ const opHeadObject = "HeadObject" // HeadObjectRequest generates a "aws/request.Request" representing the // client's request for the HeadObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See HeadObject for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the HeadObject method directly -// instead. +// See HeadObject for more information on using the HeadObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the HeadObjectRequest method. // req, resp := client.HeadObjectRequest(params) @@ -3199,7 +3769,7 @@ const opHeadObject = "HeadObject" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { op := &request.Operation{ Name: opHeadObject, @@ -3231,7 +3801,7 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation HeadObject for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { req, out := c.HeadObjectRequest(input) return out, req.Send() @@ -3257,19 +3827,18 @@ const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations" // ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the // client's request for the ListBucketAnalyticsConfigurations operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ListBucketAnalyticsConfigurations for usage and error information. +// See ListBucketAnalyticsConfigurations for more information on using the ListBucketAnalyticsConfigurations +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListBucketAnalyticsConfigurations method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListBucketAnalyticsConfigurationsRequest method. // req, resp := client.ListBucketAnalyticsConfigurationsRequest(params) @@ -3279,7 +3848,7 @@ const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalyticsConfigurationsInput) (req *request.Request, output *ListBucketAnalyticsConfigurationsOutput) { op := &request.Operation{ Name: opListBucketAnalyticsConfigurations, @@ -3306,7 +3875,7 @@ func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalytics // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListBucketAnalyticsConfigurations for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations func (c *S3) ListBucketAnalyticsConfigurations(input *ListBucketAnalyticsConfigurationsInput) (*ListBucketAnalyticsConfigurationsOutput, error) { req, out := c.ListBucketAnalyticsConfigurationsRequest(input) return out, req.Send() @@ -3332,19 +3901,18 @@ const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations" // ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the // client's request for the ListBucketInventoryConfigurations operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See ListBucketInventoryConfigurations for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListBucketInventoryConfigurations method directly -// instead. +// See ListBucketInventoryConfigurations for more information on using the ListBucketInventoryConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListBucketInventoryConfigurationsRequest method. // req, resp := client.ListBucketInventoryConfigurationsRequest(params) @@ -3354,7 +3922,7 @@ const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventoryConfigurationsInput) (req *request.Request, output *ListBucketInventoryConfigurationsOutput) { op := &request.Operation{ Name: opListBucketInventoryConfigurations, @@ -3381,7 +3949,7 @@ func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventory // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListBucketInventoryConfigurations for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations func (c *S3) ListBucketInventoryConfigurations(input *ListBucketInventoryConfigurationsInput) (*ListBucketInventoryConfigurationsOutput, error) { req, out := c.ListBucketInventoryConfigurationsRequest(input) return out, req.Send() @@ -3407,19 +3975,18 @@ const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations" // ListBucketMetricsConfigurationsRequest generates a "aws/request.Request" representing the // client's request for the ListBucketMetricsConfigurations operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ListBucketMetricsConfigurations for usage and error information. +// See ListBucketMetricsConfigurations for more information on using the ListBucketMetricsConfigurations +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListBucketMetricsConfigurations method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListBucketMetricsConfigurationsRequest method. // req, resp := client.ListBucketMetricsConfigurationsRequest(params) @@ -3429,7 +3996,7 @@ const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConfigurationsInput) (req *request.Request, output *ListBucketMetricsConfigurationsOutput) { op := &request.Operation{ Name: opListBucketMetricsConfigurations, @@ -3456,7 +4023,7 @@ func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConf // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListBucketMetricsConfigurations for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations func (c *S3) ListBucketMetricsConfigurations(input *ListBucketMetricsConfigurationsInput) (*ListBucketMetricsConfigurationsOutput, error) { req, out := c.ListBucketMetricsConfigurationsRequest(input) return out, req.Send() @@ -3482,19 +4049,18 @@ const opListBuckets = "ListBuckets" // ListBucketsRequest generates a "aws/request.Request" representing the // client's request for the ListBuckets operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See ListBuckets for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListBuckets method directly -// instead. +// See ListBuckets for more information on using the ListBuckets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListBucketsRequest method. // req, resp := client.ListBucketsRequest(params) @@ -3504,7 +4070,7 @@ const opListBuckets = "ListBuckets" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { op := &request.Operation{ Name: opListBuckets, @@ -3531,7 +4097,7 @@ func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListBuckets for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { req, out := c.ListBucketsRequest(input) return out, req.Send() @@ -3557,19 +4123,18 @@ const opListMultipartUploads = "ListMultipartUploads" // ListMultipartUploadsRequest generates a "aws/request.Request" representing the // client's request for the ListMultipartUploads operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ListMultipartUploads for usage and error information. +// See ListMultipartUploads for more information on using the ListMultipartUploads +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListMultipartUploads method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListMultipartUploadsRequest method. // req, resp := client.ListMultipartUploadsRequest(params) @@ -3579,7 +4144,7 @@ const opListMultipartUploads = "ListMultipartUploads" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { op := &request.Operation{ Name: opListMultipartUploads, @@ -3612,7 +4177,7 @@ func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListMultipartUploads for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { req, out := c.ListMultipartUploadsRequest(input) return out, req.Send() @@ -3688,19 +4253,18 @@ const opListObjectVersions = "ListObjectVersions" // ListObjectVersionsRequest generates a "aws/request.Request" representing the // client's request for the ListObjectVersions operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See ListObjectVersions for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListObjectVersions method directly -// instead. +// See ListObjectVersions for more information on using the ListObjectVersions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListObjectVersionsRequest method. // req, resp := client.ListObjectVersionsRequest(params) @@ -3710,7 +4274,7 @@ const opListObjectVersions = "ListObjectVersions" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { op := &request.Operation{ Name: opListObjectVersions, @@ -3743,7 +4307,7 @@ func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListObjectVersions for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { req, out := c.ListObjectVersionsRequest(input) return out, req.Send() @@ -3819,19 +4383,18 @@ const opListObjects = "ListObjects" // ListObjectsRequest generates a "aws/request.Request" representing the // client's request for the ListObjects operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ListObjects for usage and error information. +// See ListObjects for more information on using the ListObjects +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListObjects method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListObjectsRequest method. // req, resp := client.ListObjectsRequest(params) @@ -3841,7 +4404,7 @@ const opListObjects = "ListObjects" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { op := &request.Operation{ Name: opListObjects, @@ -3881,7 +4444,7 @@ func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, // * ErrCodeNoSuchBucket "NoSuchBucket" // The specified bucket does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { req, out := c.ListObjectsRequest(input) return out, req.Send() @@ -3957,19 +4520,18 @@ const opListObjectsV2 = "ListObjectsV2" // ListObjectsV2Request generates a "aws/request.Request" representing the // client's request for the ListObjectsV2 operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See ListObjectsV2 for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListObjectsV2 method directly -// instead. +// See ListObjectsV2 for more information on using the ListObjectsV2 +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListObjectsV2Request method. // req, resp := client.ListObjectsV2Request(params) @@ -3979,7 +4541,7 @@ const opListObjectsV2 = "ListObjectsV2" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) { op := &request.Operation{ Name: opListObjectsV2, @@ -4020,7 +4582,7 @@ func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Reque // * ErrCodeNoSuchBucket "NoSuchBucket" // The specified bucket does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) { req, out := c.ListObjectsV2Request(input) return out, req.Send() @@ -4096,19 +4658,18 @@ const opListParts = "ListParts" // ListPartsRequest generates a "aws/request.Request" representing the // client's request for the ListParts operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ListParts for usage and error information. +// See ListParts for more information on using the ListParts +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListParts method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListPartsRequest method. // req, resp := client.ListPartsRequest(params) @@ -4118,7 +4679,7 @@ const opListParts = "ListParts" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { op := &request.Operation{ Name: opListParts, @@ -4151,7 +4712,7 @@ func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, outp // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListParts for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { req, out := c.ListPartsRequest(input) return out, req.Send() @@ -4227,19 +4788,18 @@ const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" // PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketAccelerateConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See PutBucketAccelerateConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketAccelerateConfiguration method directly -// instead. +// See PutBucketAccelerateConfiguration for more information on using the PutBucketAccelerateConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketAccelerateConfigurationRequest method. // req, resp := client.PutBucketAccelerateConfigurationRequest(params) @@ -4249,7 +4809,7 @@ const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) { op := &request.Operation{ Name: opPutBucketAccelerateConfiguration, @@ -4263,8 +4823,7 @@ func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateC output = &PutBucketAccelerateConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -4278,7 +4837,7 @@ func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateC // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketAccelerateConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) { req, out := c.PutBucketAccelerateConfigurationRequest(input) return out, req.Send() @@ -4304,19 +4863,18 @@ const opPutBucketAcl = "PutBucketAcl" // PutBucketAclRequest generates a "aws/request.Request" representing the // client's request for the PutBucketAcl operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See PutBucketAcl for usage and error information. +// See PutBucketAcl for more information on using the PutBucketAcl +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketAcl method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketAclRequest method. // req, resp := client.PutBucketAclRequest(params) @@ -4326,7 +4884,7 @@ const opPutBucketAcl = "PutBucketAcl" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { op := &request.Operation{ Name: opPutBucketAcl, @@ -4340,8 +4898,7 @@ func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request output = &PutBucketAclOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -4355,7 +4912,7 @@ func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketAcl for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) { req, out := c.PutBucketAclRequest(input) return out, req.Send() @@ -4381,19 +4938,18 @@ const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration" // PutBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketAnalyticsConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See PutBucketAnalyticsConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketAnalyticsConfiguration method directly -// instead. +// See PutBucketAnalyticsConfiguration for more information on using the PutBucketAnalyticsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketAnalyticsConfigurationRequest method. // req, resp := client.PutBucketAnalyticsConfigurationRequest(params) @@ -4403,7 +4959,7 @@ const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsConfigurationInput) (req *request.Request, output *PutBucketAnalyticsConfigurationOutput) { op := &request.Operation{ Name: opPutBucketAnalyticsConfiguration, @@ -4417,8 +4973,7 @@ func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsCon output = &PutBucketAnalyticsConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -4433,7 +4988,7 @@ func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsCon // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketAnalyticsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration func (c *S3) PutBucketAnalyticsConfiguration(input *PutBucketAnalyticsConfigurationInput) (*PutBucketAnalyticsConfigurationOutput, error) { req, out := c.PutBucketAnalyticsConfigurationRequest(input) return out, req.Send() @@ -4459,19 +5014,18 @@ const opPutBucketCors = "PutBucketCors" // PutBucketCorsRequest generates a "aws/request.Request" representing the // client's request for the PutBucketCors operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See PutBucketCors for usage and error information. +// See PutBucketCors for more information on using the PutBucketCors +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketCors method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketCorsRequest method. // req, resp := client.PutBucketCorsRequest(params) @@ -4481,7 +5035,7 @@ const opPutBucketCors = "PutBucketCors" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { op := &request.Operation{ Name: opPutBucketCors, @@ -4495,14 +5049,13 @@ func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Reque output = &PutBucketCorsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // PutBucketCors API operation for Amazon Simple Storage Service. // -// Sets the cors configuration for a bucket. +// Sets the CORS configuration for a bucket. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4510,7 +5063,7 @@ func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Reque // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketCors for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) { req, out := c.PutBucketCorsRequest(input) return out, req.Send() @@ -4532,111 +5085,186 @@ func (c *S3) PutBucketCorsWithContext(ctx aws.Context, input *PutBucketCorsInput return out, req.Send() } -const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" +const opPutBucketEncryption = "PutBucketEncryption" -// PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketInventoryConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// PutBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// See PutBucketInventoryConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketInventoryConfiguration method directly -// instead. +// See PutBucketEncryption for more information on using the PutBucketEncryption +// API call, and error handling. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the PutBucketInventoryConfigurationRequest method. -// req, resp := client.PutBucketInventoryConfigurationRequest(params) +// +// // Example sending a request using the PutBucketEncryptionRequest method. +// req, resp := client.PutBucketEncryptionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration -func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryConfigurationInput) (req *request.Request, output *PutBucketInventoryConfigurationOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption +func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *request.Request, output *PutBucketEncryptionOutput) { op := &request.Operation{ - Name: opPutBucketInventoryConfiguration, + Name: opPutBucketEncryption, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?inventory", + HTTPPath: "/{Bucket}?encryption", } if input == nil { - input = &PutBucketInventoryConfigurationInput{} + input = &PutBucketEncryptionInput{} } - output = &PutBucketInventoryConfigurationOutput{} + output = &PutBucketEncryptionOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// PutBucketEncryption API operation for Amazon Simple Storage Service. // -// Adds an inventory configuration (identified by the inventory ID) from the -// bucket. +// Creates a new server-side encryption configuration (or replaces an existing +// one, if present). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketInventoryConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration -func (c *S3) PutBucketInventoryConfiguration(input *PutBucketInventoryConfigurationInput) (*PutBucketInventoryConfigurationOutput, error) { - req, out := c.PutBucketInventoryConfigurationRequest(input) +// API operation PutBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption +func (c *S3) PutBucketEncryption(input *PutBucketEncryptionInput) (*PutBucketEncryptionOutput, error) { + req, out := c.PutBucketEncryptionRequest(input) return out, req.Send() } -// PutBucketInventoryConfigurationWithContext is the same as PutBucketInventoryConfiguration with the addition of +// PutBucketEncryptionWithContext is the same as PutBucketEncryption with the addition of // the ability to pass a context and additional request options. // -// See PutBucketInventoryConfiguration for details on how to use this API operation. +// See PutBucketEncryption for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) PutBucketInventoryConfigurationWithContext(ctx aws.Context, input *PutBucketInventoryConfigurationInput, opts ...request.Option) (*PutBucketInventoryConfigurationOutput, error) { - req, out := c.PutBucketInventoryConfigurationRequest(input) +func (c *S3) PutBucketEncryptionWithContext(ctx aws.Context, input *PutBucketEncryptionInput, opts ...request.Option) (*PutBucketEncryptionOutput, error) { + req, out := c.PutBucketEncryptionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutBucketLifecycle = "PutBucketLifecycle" +const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" -// PutBucketLifecycleRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketLifecycle operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketInventoryConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// See PutBucketLifecycle for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketLifecycle method directly -// instead. +// See PutBucketInventoryConfiguration for more information on using the PutBucketInventoryConfiguration +// API call, and error handling. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the PutBucketLifecycleRequest method. -// req, resp := client.PutBucketLifecycleRequest(params) +// +// // Example sending a request using the PutBucketInventoryConfigurationRequest method. +// req, resp := client.PutBucketInventoryConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryConfigurationInput) (req *request.Request, output *PutBucketInventoryConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketInventoryConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &PutBucketInventoryConfigurationInput{} + } + + output = &PutBucketInventoryConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// +// Adds an inventory configuration (identified by the inventory ID) from the +// bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketInventoryConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +func (c *S3) PutBucketInventoryConfiguration(input *PutBucketInventoryConfigurationInput) (*PutBucketInventoryConfigurationOutput, error) { + req, out := c.PutBucketInventoryConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketInventoryConfigurationWithContext is the same as PutBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketInventoryConfigurationWithContext(ctx aws.Context, input *PutBucketInventoryConfigurationInput, opts ...request.Option) (*PutBucketInventoryConfigurationOutput, error) { + req, out := c.PutBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLifecycle = "PutBucketLifecycle" + +// PutBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycle operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketLifecycle for more information on using the PutBucketLifecycle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketLifecycleRequest method. +// req, resp := client.PutBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +// +// Deprecated: PutBucketLifecycle has been deprecated func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) { if c.Client.Config.Logger != nil { c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated") @@ -4653,8 +5281,7 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req output = &PutBucketLifecycleOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -4668,7 +5295,9 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketLifecycle for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +// +// Deprecated: PutBucketLifecycle has been deprecated func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { req, out := c.PutBucketLifecycleRequest(input) return out, req.Send() @@ -4683,6 +5312,8 @@ func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifec // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. +// +// Deprecated: PutBucketLifecycleWithContext has been deprecated func (c *S3) PutBucketLifecycleWithContext(ctx aws.Context, input *PutBucketLifecycleInput, opts ...request.Option) (*PutBucketLifecycleOutput, error) { req, out := c.PutBucketLifecycleRequest(input) req.SetContext(ctx) @@ -4694,19 +5325,18 @@ const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" // PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketLifecycleConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See PutBucketLifecycleConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketLifecycleConfiguration method directly -// instead. +// See PutBucketLifecycleConfiguration for more information on using the PutBucketLifecycleConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketLifecycleConfigurationRequest method. // req, resp := client.PutBucketLifecycleConfigurationRequest(params) @@ -4716,7 +5346,7 @@ const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { op := &request.Operation{ Name: opPutBucketLifecycleConfiguration, @@ -4730,8 +5360,7 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon output = &PutBucketLifecycleConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -4746,7 +5375,7 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketLifecycleConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { req, out := c.PutBucketLifecycleConfigurationRequest(input) return out, req.Send() @@ -4772,19 +5401,18 @@ const opPutBucketLogging = "PutBucketLogging" // PutBucketLoggingRequest generates a "aws/request.Request" representing the // client's request for the PutBucketLogging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See PutBucketLogging for usage and error information. +// See PutBucketLogging for more information on using the PutBucketLogging +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketLogging method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketLoggingRequest method. // req, resp := client.PutBucketLoggingRequest(params) @@ -4794,7 +5422,7 @@ const opPutBucketLogging = "PutBucketLogging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { op := &request.Operation{ Name: opPutBucketLogging, @@ -4808,8 +5436,7 @@ func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request output = &PutBucketLoggingOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -4825,7 +5452,7 @@ func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketLogging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { req, out := c.PutBucketLoggingRequest(input) return out, req.Send() @@ -4851,19 +5478,18 @@ const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration" // PutBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketMetricsConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See PutBucketMetricsConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketMetricsConfiguration method directly -// instead. +// See PutBucketMetricsConfiguration for more information on using the PutBucketMetricsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketMetricsConfigurationRequest method. // req, resp := client.PutBucketMetricsConfigurationRequest(params) @@ -4873,7 +5499,7 @@ const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigurationInput) (req *request.Request, output *PutBucketMetricsConfigurationOutput) { op := &request.Operation{ Name: opPutBucketMetricsConfiguration, @@ -4887,8 +5513,7 @@ func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigu output = &PutBucketMetricsConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -4903,7 +5528,7 @@ func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigu // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketMetricsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration func (c *S3) PutBucketMetricsConfiguration(input *PutBucketMetricsConfigurationInput) (*PutBucketMetricsConfigurationOutput, error) { req, out := c.PutBucketMetricsConfigurationRequest(input) return out, req.Send() @@ -4929,19 +5554,18 @@ const opPutBucketNotification = "PutBucketNotification" // PutBucketNotificationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketNotification operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See PutBucketNotification for usage and error information. +// See PutBucketNotification for more information on using the PutBucketNotification +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketNotification method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketNotificationRequest method. // req, resp := client.PutBucketNotificationRequest(params) @@ -4951,7 +5575,9 @@ const opPutBucketNotification = "PutBucketNotification" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +// +// Deprecated: PutBucketNotification has been deprecated func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) { if c.Client.Config.Logger != nil { c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated") @@ -4968,8 +5594,7 @@ func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (re output = &PutBucketNotificationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -4983,7 +5608,9 @@ func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (re // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketNotification for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +// +// Deprecated: PutBucketNotification has been deprecated func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { req, out := c.PutBucketNotificationRequest(input) return out, req.Send() @@ -4998,6 +5625,8 @@ func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucke // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. +// +// Deprecated: PutBucketNotificationWithContext has been deprecated func (c *S3) PutBucketNotificationWithContext(ctx aws.Context, input *PutBucketNotificationInput, opts ...request.Option) (*PutBucketNotificationOutput, error) { req, out := c.PutBucketNotificationRequest(input) req.SetContext(ctx) @@ -5009,19 +5638,18 @@ const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration // PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketNotificationConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See PutBucketNotificationConfiguration for usage and error information. +// See PutBucketNotificationConfiguration for more information on using the PutBucketNotificationConfiguration +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketNotificationConfiguration method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketNotificationConfigurationRequest method. // req, resp := client.PutBucketNotificationConfigurationRequest(params) @@ -5031,7 +5659,7 @@ const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) { op := &request.Operation{ Name: opPutBucketNotificationConfiguration, @@ -5045,8 +5673,7 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat output = &PutBucketNotificationConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -5060,7 +5687,7 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketNotificationConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { req, out := c.PutBucketNotificationConfigurationRequest(input) return out, req.Send() @@ -5086,19 +5713,18 @@ const opPutBucketPolicy = "PutBucketPolicy" // PutBucketPolicyRequest generates a "aws/request.Request" representing the // client's request for the PutBucketPolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See PutBucketPolicy for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketPolicy method directly -// instead. +// See PutBucketPolicy for more information on using the PutBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketPolicyRequest method. // req, resp := client.PutBucketPolicyRequest(params) @@ -5108,7 +5734,7 @@ const opPutBucketPolicy = "PutBucketPolicy" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { op := &request.Operation{ Name: opPutBucketPolicy, @@ -5122,8 +5748,7 @@ func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.R output = &PutBucketPolicyOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -5138,7 +5763,7 @@ func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.R // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketPolicy for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { req, out := c.PutBucketPolicyRequest(input) return out, req.Send() @@ -5164,19 +5789,18 @@ const opPutBucketReplication = "PutBucketReplication" // PutBucketReplicationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketReplication operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See PutBucketReplication for usage and error information. +// See PutBucketReplication for more information on using the PutBucketReplication +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketReplication method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketReplicationRequest method. // req, resp := client.PutBucketReplicationRequest(params) @@ -5186,7 +5810,7 @@ const opPutBucketReplication = "PutBucketReplication" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) { op := &request.Operation{ Name: opPutBucketReplication, @@ -5200,15 +5824,15 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req output = &PutBucketReplicationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // PutBucketReplication API operation for Amazon Simple Storage Service. // -// Creates a new replication configuration (or replaces an existing one, if -// present). +// Creates a replication configuration or replaces an existing one. For more +// information, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) +// in the Amazon S3 Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5216,7 +5840,7 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketReplication for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { req, out := c.PutBucketReplicationRequest(input) return out, req.Send() @@ -5242,19 +5866,18 @@ const opPutBucketRequestPayment = "PutBucketRequestPayment" // PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the // client's request for the PutBucketRequestPayment operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See PutBucketRequestPayment for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketRequestPayment method directly -// instead. +// See PutBucketRequestPayment for more information on using the PutBucketRequestPayment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketRequestPaymentRequest method. // req, resp := client.PutBucketRequestPaymentRequest(params) @@ -5264,7 +5887,7 @@ const opPutBucketRequestPayment = "PutBucketRequestPayment" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) { op := &request.Operation{ Name: opPutBucketRequestPayment, @@ -5278,8 +5901,7 @@ func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) output = &PutBucketRequestPaymentOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -5297,7 +5919,7 @@ func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketRequestPayment for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { req, out := c.PutBucketRequestPaymentRequest(input) return out, req.Send() @@ -5323,19 +5945,18 @@ const opPutBucketTagging = "PutBucketTagging" // PutBucketTaggingRequest generates a "aws/request.Request" representing the // client's request for the PutBucketTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See PutBucketTagging for usage and error information. +// See PutBucketTagging for more information on using the PutBucketTagging +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketTagging method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketTaggingRequest method. // req, resp := client.PutBucketTaggingRequest(params) @@ -5345,7 +5966,7 @@ const opPutBucketTagging = "PutBucketTagging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { op := &request.Operation{ Name: opPutBucketTagging, @@ -5359,8 +5980,7 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request output = &PutBucketTaggingOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -5374,7 +5994,7 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { req, out := c.PutBucketTaggingRequest(input) return out, req.Send() @@ -5400,19 +6020,18 @@ const opPutBucketVersioning = "PutBucketVersioning" // PutBucketVersioningRequest generates a "aws/request.Request" representing the // client's request for the PutBucketVersioning operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See PutBucketVersioning for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketVersioning method directly -// instead. +// See PutBucketVersioning for more information on using the PutBucketVersioning +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketVersioningRequest method. // req, resp := client.PutBucketVersioningRequest(params) @@ -5422,7 +6041,7 @@ const opPutBucketVersioning = "PutBucketVersioning" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { op := &request.Operation{ Name: opPutBucketVersioning, @@ -5436,8 +6055,7 @@ func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *r output = &PutBucketVersioningOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -5452,7 +6070,7 @@ func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *r // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketVersioning for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { req, out := c.PutBucketVersioningRequest(input) return out, req.Send() @@ -5478,19 +6096,18 @@ const opPutBucketWebsite = "PutBucketWebsite" // PutBucketWebsiteRequest generates a "aws/request.Request" representing the // client's request for the PutBucketWebsite operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See PutBucketWebsite for usage and error information. +// See PutBucketWebsite for more information on using the PutBucketWebsite +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketWebsite method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketWebsiteRequest method. // req, resp := client.PutBucketWebsiteRequest(params) @@ -5500,7 +6117,7 @@ const opPutBucketWebsite = "PutBucketWebsite" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { op := &request.Operation{ Name: opPutBucketWebsite, @@ -5514,8 +6131,7 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request output = &PutBucketWebsiteOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -5529,7 +6145,7 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketWebsite for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { req, out := c.PutBucketWebsiteRequest(input) return out, req.Send() @@ -5555,19 +6171,18 @@ const opPutObject = "PutObject" // PutObjectRequest generates a "aws/request.Request" representing the // client's request for the PutObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See PutObject for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutObject method directly -// instead. +// See PutObject for more information on using the PutObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutObjectRequest method. // req, resp := client.PutObjectRequest(params) @@ -5577,7 +6192,7 @@ const opPutObject = "PutObject" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { op := &request.Operation{ Name: opPutObject, @@ -5604,7 +6219,7 @@ func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, outp // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutObject for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { req, out := c.PutObjectRequest(input) return out, req.Send() @@ -5630,19 +6245,18 @@ const opPutObjectAcl = "PutObjectAcl" // PutObjectAclRequest generates a "aws/request.Request" representing the // client's request for the PutObjectAcl operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See PutObjectAcl for usage and error information. +// See PutObjectAcl for more information on using the PutObjectAcl +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutObjectAcl method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutObjectAclRequest method. // req, resp := client.PutObjectAclRequest(params) @@ -5652,7 +6266,7 @@ const opPutObjectAcl = "PutObjectAcl" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { op := &request.Operation{ Name: opPutObjectAcl, @@ -5685,7 +6299,7 @@ func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request // * ErrCodeNoSuchKey "NoSuchKey" // The specified key does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) { req, out := c.PutObjectAclRequest(input) return out, req.Send() @@ -5707,501 +6321,2691 @@ func (c *S3) PutObjectAclWithContext(ctx aws.Context, input *PutObjectAclInput, return out, req.Send() } -const opPutObjectTagging = "PutObjectTagging" +const opPutObjectLegalHold = "PutObjectLegalHold" -// PutObjectTaggingRequest generates a "aws/request.Request" representing the -// client's request for the PutObjectTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// PutObjectLegalHoldRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectLegalHold operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// See PutObjectTagging for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutObjectTagging method directly -// instead. +// See PutObjectLegalHold for more information on using the PutObjectLegalHold +// API call, and error handling. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the PutObjectTaggingRequest method. -// req, resp := client.PutObjectTaggingRequest(params) +// +// // Example sending a request using the PutObjectLegalHoldRequest method. +// req, resp := client.PutObjectLegalHoldRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging -func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold +func (c *S3) PutObjectLegalHoldRequest(input *PutObjectLegalHoldInput) (req *request.Request, output *PutObjectLegalHoldOutput) { op := &request.Operation{ - Name: opPutObjectTagging, + Name: opPutObjectLegalHold, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}?tagging", + HTTPPath: "/{Bucket}/{Key+}?legal-hold", } if input == nil { - input = &PutObjectTaggingInput{} + input = &PutObjectLegalHoldInput{} } - output = &PutObjectTaggingOutput{} + output = &PutObjectLegalHoldOutput{} req = c.newRequest(op, input, output) return } -// PutObjectTagging API operation for Amazon Simple Storage Service. +// PutObjectLegalHold API operation for Amazon Simple Storage Service. // -// Sets the supplied tag-set to an object that already exists in a bucket +// Applies a Legal Hold configuration to the specified object. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutObjectTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging -func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) { - req, out := c.PutObjectTaggingRequest(input) +// API operation PutObjectLegalHold for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold +func (c *S3) PutObjectLegalHold(input *PutObjectLegalHoldInput) (*PutObjectLegalHoldOutput, error) { + req, out := c.PutObjectLegalHoldRequest(input) return out, req.Send() } -// PutObjectTaggingWithContext is the same as PutObjectTagging with the addition of +// PutObjectLegalHoldWithContext is the same as PutObjectLegalHold with the addition of // the ability to pass a context and additional request options. // -// See PutObjectTagging for details on how to use this API operation. +// See PutObjectLegalHold for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) PutObjectTaggingWithContext(ctx aws.Context, input *PutObjectTaggingInput, opts ...request.Option) (*PutObjectTaggingOutput, error) { - req, out := c.PutObjectTaggingRequest(input) +func (c *S3) PutObjectLegalHoldWithContext(ctx aws.Context, input *PutObjectLegalHoldInput, opts ...request.Option) (*PutObjectLegalHoldOutput, error) { + req, out := c.PutObjectLegalHoldRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opRestoreObject = "RestoreObject" +const opPutObjectLockConfiguration = "PutObjectLockConfiguration" -// RestoreObjectRequest generates a "aws/request.Request" representing the -// client's request for the RestoreObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// PutObjectLockConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectLockConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// See RestoreObject for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the RestoreObject method directly -// instead. +// See PutObjectLockConfiguration for more information on using the PutObjectLockConfiguration +// API call, and error handling. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the RestoreObjectRequest method. -// req, resp := client.RestoreObjectRequest(params) +// +// // Example sending a request using the PutObjectLockConfigurationRequest method. +// req, resp := client.PutObjectLockConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject -func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration +func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfigurationInput) (req *request.Request, output *PutObjectLockConfigurationOutput) { op := &request.Operation{ - Name: opRestoreObject, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}/{Key+}?restore", + Name: opPutObjectLockConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?object-lock", } if input == nil { - input = &RestoreObjectInput{} + input = &PutObjectLockConfigurationInput{} } - output = &RestoreObjectOutput{} + output = &PutObjectLockConfigurationOutput{} req = c.newRequest(op, input, output) return } -// RestoreObject API operation for Amazon Simple Storage Service. +// PutObjectLockConfiguration API operation for Amazon Simple Storage Service. // -// Restores an archived copy of an object back into Amazon S3 +// Places an Object Lock configuration on the specified bucket. The rule specified +// in the Object Lock configuration will be applied by default to every new +// object placed in the specified bucket. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation RestoreObject for usage and error information. -// -// Returned Error Codes: -// * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" -// This operation is not allowed against this storage tier -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject -func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { - req, out := c.RestoreObjectRequest(input) +// API operation PutObjectLockConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration +func (c *S3) PutObjectLockConfiguration(input *PutObjectLockConfigurationInput) (*PutObjectLockConfigurationOutput, error) { + req, out := c.PutObjectLockConfigurationRequest(input) return out, req.Send() } -// RestoreObjectWithContext is the same as RestoreObject with the addition of +// PutObjectLockConfigurationWithContext is the same as PutObjectLockConfiguration with the addition of // the ability to pass a context and additional request options. // -// See RestoreObject for details on how to use this API operation. +// See PutObjectLockConfiguration for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput, opts ...request.Option) (*RestoreObjectOutput, error) { - req, out := c.RestoreObjectRequest(input) +func (c *S3) PutObjectLockConfigurationWithContext(ctx aws.Context, input *PutObjectLockConfigurationInput, opts ...request.Option) (*PutObjectLockConfigurationOutput, error) { + req, out := c.PutObjectLockConfigurationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUploadPart = "UploadPart" +const opPutObjectRetention = "PutObjectRetention" -// UploadPartRequest generates a "aws/request.Request" representing the -// client's request for the UploadPart operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// PutObjectRetentionRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectRetention operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// See UploadPart for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the UploadPart method directly -// instead. +// See PutObjectRetention for more information on using the PutObjectRetention +// API call, and error handling. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the UploadPartRequest method. -// req, resp := client.UploadPartRequest(params) +// +// // Example sending a request using the PutObjectRetentionRequest method. +// req, resp := client.PutObjectRetentionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart -func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention +func (c *S3) PutObjectRetentionRequest(input *PutObjectRetentionInput) (req *request.Request, output *PutObjectRetentionOutput) { op := &request.Operation{ - Name: opUploadPart, + Name: opPutObjectRetention, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}", + HTTPPath: "/{Bucket}/{Key+}?retention", } if input == nil { - input = &UploadPartInput{} + input = &PutObjectRetentionInput{} } - output = &UploadPartOutput{} + output = &PutObjectRetentionOutput{} req = c.newRequest(op, input, output) return } -// UploadPart API operation for Amazon Simple Storage Service. -// -// Uploads a part in a multipart upload. +// PutObjectRetention API operation for Amazon Simple Storage Service. // -// Note: After you initiate multipart upload and upload one or more parts, you -// must either complete or abort multipart upload in order to stop getting charged -// for storage of the uploaded parts. Only after you either complete or abort -// multipart upload, Amazon S3 frees up the parts storage and stops charging -// you for the parts storage. +// Places an Object Retention configuration on an object. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation UploadPart for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart -func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { - req, out := c.UploadPartRequest(input) +// API operation PutObjectRetention for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention +func (c *S3) PutObjectRetention(input *PutObjectRetentionInput) (*PutObjectRetentionOutput, error) { + req, out := c.PutObjectRetentionRequest(input) return out, req.Send() } -// UploadPartWithContext is the same as UploadPart with the addition of +// PutObjectRetentionWithContext is the same as PutObjectRetention with the addition of // the ability to pass a context and additional request options. // -// See UploadPart for details on how to use this API operation. +// See PutObjectRetention for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) UploadPartWithContext(ctx aws.Context, input *UploadPartInput, opts ...request.Option) (*UploadPartOutput, error) { - req, out := c.UploadPartRequest(input) +func (c *S3) PutObjectRetentionWithContext(ctx aws.Context, input *PutObjectRetentionInput, opts ...request.Option) (*PutObjectRetentionOutput, error) { + req, out := c.PutObjectRetentionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUploadPartCopy = "UploadPartCopy" +const opPutObjectTagging = "PutObjectTagging" -// UploadPartCopyRequest generates a "aws/request.Request" representing the -// client's request for the UploadPartCopy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// PutObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// See UploadPartCopy for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the UploadPartCopy method directly -// instead. +// See PutObjectTagging for more information on using the PutObjectTagging +// API call, and error handling. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the UploadPartCopyRequest method. -// req, resp := client.UploadPartCopyRequest(params) +// +// // Example sending a request using the PutObjectTaggingRequest method. +// req, resp := client.PutObjectTaggingRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy -func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) { op := &request.Operation{ - Name: opUploadPartCopy, + Name: opPutObjectTagging, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}", + HTTPPath: "/{Bucket}/{Key+}?tagging", } if input == nil { - input = &UploadPartCopyInput{} + input = &PutObjectTaggingInput{} } - output = &UploadPartCopyOutput{} + output = &PutObjectTaggingOutput{} req = c.newRequest(op, input, output) return } -// UploadPartCopy API operation for Amazon Simple Storage Service. +// PutObjectTagging API operation for Amazon Simple Storage Service. // -// Uploads a part by copying data from an existing object as data source. +// Sets the supplied tag-set to an object that already exists in a bucket // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation UploadPartCopy for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy -func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { - req, out := c.UploadPartCopyRequest(input) +// API operation PutObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) return out, req.Send() } -// UploadPartCopyWithContext is the same as UploadPartCopy with the addition of +// PutObjectTaggingWithContext is the same as PutObjectTagging with the addition of // the ability to pass a context and additional request options. // -// See UploadPartCopy for details on how to use this API operation. +// See PutObjectTagging for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInput, opts ...request.Option) (*UploadPartCopyOutput, error) { - req, out := c.UploadPartCopyRequest(input) +func (c *S3) PutObjectTaggingWithContext(ctx aws.Context, input *PutObjectTaggingInput, opts ...request.Option) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// Specifies the days since the initiation of an Incomplete Multipart Upload -// that Lifecycle will wait before permanently removing all parts of the upload. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortIncompleteMultipartUpload -type AbortIncompleteMultipartUpload struct { - _ struct{} `type:"structure"` +const opPutPublicAccessBlock = "PutPublicAccessBlock" - // Indicates the number of days that must pass since initiation for Lifecycle - // to abort an Incomplete Multipart Upload. - DaysAfterInitiation *int64 `type:"integer"` -} +// PutPublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the PutPublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutPublicAccessBlock for more information on using the PutPublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutPublicAccessBlockRequest method. +// req, resp := client.PutPublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock +func (c *S3) PutPublicAccessBlockRequest(input *PutPublicAccessBlockInput) (req *request.Request, output *PutPublicAccessBlockOutput) { + op := &request.Operation{ + Name: opPutPublicAccessBlock, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?publicAccessBlock", + } -// String returns the string representation -func (s AbortIncompleteMultipartUpload) String() string { - return awsutil.Prettify(s) + if input == nil { + input = &PutPublicAccessBlockInput{} + } + + output = &PutPublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return } -// GoString returns the string representation -func (s AbortIncompleteMultipartUpload) GoString() string { - return s.String() +// PutPublicAccessBlock API operation for Amazon Simple Storage Service. +// +// Creates or modifies the PublicAccessBlock configuration for an Amazon S3 +// bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutPublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock +func (c *S3) PutPublicAccessBlock(input *PutPublicAccessBlockInput) (*PutPublicAccessBlockOutput, error) { + req, out := c.PutPublicAccessBlockRequest(input) + return out, req.Send() } -// SetDaysAfterInitiation sets the DaysAfterInitiation field's value. -func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortIncompleteMultipartUpload { - s.DaysAfterInitiation = &v - return s +// PutPublicAccessBlockWithContext is the same as PutPublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See PutPublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutPublicAccessBlockWithContext(ctx aws.Context, input *PutPublicAccessBlockInput, opts ...request.Option) (*PutPublicAccessBlockOutput, error) { + req, out := c.PutPublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadRequest -type AbortMultipartUploadInput struct { - _ struct{} `type:"structure"` +const opRestoreObject = "RestoreObject" - // Bucket is a required field +// RestoreObjectRequest generates a "aws/request.Request" representing the +// client's request for the RestoreObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RestoreObject for more information on using the RestoreObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RestoreObjectRequest method. +// req, resp := client.RestoreObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { + op := &request.Operation{ + Name: opRestoreObject, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?restore", + } + + if input == nil { + input = &RestoreObjectInput{} + } + + output = &RestoreObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// RestoreObject API operation for Amazon Simple Storage Service. +// +// Restores an archived copy of an object back into Amazon S3 +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation RestoreObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" +// This operation is not allowed against this storage tier +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + return out, req.Send() +} + +// RestoreObjectWithContext is the same as RestoreObject with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput, opts ...request.Option) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSelectObjectContent = "SelectObjectContent" + +// SelectObjectContentRequest generates a "aws/request.Request" representing the +// client's request for the SelectObjectContent operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SelectObjectContent for more information on using the SelectObjectContent +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SelectObjectContentRequest method. +// req, resp := client.SelectObjectContentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent +func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *request.Request, output *SelectObjectContentOutput) { + op := &request.Operation{ + Name: opSelectObjectContent, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?select&select-type=2", + } + + if input == nil { + input = &SelectObjectContentInput{} + } + + output = &SelectObjectContentOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Send.Swap(client.LogHTTPResponseHandler.Name, client.LogHTTPResponseHeaderHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, rest.UnmarshalHandler) + req.Handlers.Unmarshal.PushBack(output.runEventStreamLoop) + return +} + +// SelectObjectContent API operation for Amazon Simple Storage Service. +// +// This operation filters the contents of an Amazon S3 object based on a simple +// Structured Query Language (SQL) statement. In the request, along with the +// SQL expression, you must also specify a data serialization format (JSON or +// CSV) of the object. Amazon S3 uses this to parse object data into records, +// and returns only records that match the specified SQL expression. You must +// also specify the data serialization format for the response. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation SelectObjectContent for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent +func (c *S3) SelectObjectContent(input *SelectObjectContentInput) (*SelectObjectContentOutput, error) { + req, out := c.SelectObjectContentRequest(input) + return out, req.Send() +} + +// SelectObjectContentWithContext is the same as SelectObjectContent with the addition of +// the ability to pass a context and additional request options. +// +// See SelectObjectContent for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) SelectObjectContentWithContext(ctx aws.Context, input *SelectObjectContentInput, opts ...request.Option) (*SelectObjectContentOutput, error) { + req, out := c.SelectObjectContentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUploadPart = "UploadPart" + +// UploadPartRequest generates a "aws/request.Request" representing the +// client's request for the UploadPart operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UploadPart for more information on using the UploadPart +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UploadPartRequest method. +// req, resp := client.UploadPartRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { + op := &request.Operation{ + Name: opUploadPart, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartInput{} + } + + output = &UploadPartOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadPart API operation for Amazon Simple Storage Service. +// +// Uploads a part in a multipart upload. +// +// Note: After you initiate multipart upload and upload one or more parts, you +// must either complete or abort multipart upload in order to stop getting charged +// for storage of the uploaded parts. Only after you either complete or abort +// multipart upload, Amazon S3 frees up the parts storage and stops charging +// you for the parts storage. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation UploadPart for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + return out, req.Send() +} + +// UploadPartWithContext is the same as UploadPart with the addition of +// the ability to pass a context and additional request options. +// +// See UploadPart for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) UploadPartWithContext(ctx aws.Context, input *UploadPartInput, opts ...request.Option) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUploadPartCopy = "UploadPartCopy" + +// UploadPartCopyRequest generates a "aws/request.Request" representing the +// client's request for the UploadPartCopy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UploadPartCopy for more information on using the UploadPartCopy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UploadPartCopyRequest method. +// req, resp := client.UploadPartCopyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { + op := &request.Operation{ + Name: opUploadPartCopy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartCopyInput{} + } + + output = &UploadPartCopyOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadPartCopy API operation for Amazon Simple Storage Service. +// +// Uploads a part by copying data from an existing object as data source. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation UploadPartCopy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + return out, req.Send() +} + +// UploadPartCopyWithContext is the same as UploadPartCopy with the addition of +// the ability to pass a context and additional request options. +// +// See UploadPartCopy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInput, opts ...request.Option) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Specifies the days since the initiation of an Incomplete Multipart Upload +// that Lifecycle will wait before permanently removing all parts of the upload. +type AbortIncompleteMultipartUpload struct { + _ struct{} `type:"structure"` + + // Indicates the number of days that must pass since initiation for Lifecycle + // to abort an Incomplete Multipart Upload. + DaysAfterInitiation *int64 `type:"integer"` +} + +// String returns the string representation +func (s AbortIncompleteMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortIncompleteMultipartUpload) GoString() string { + return s.String() +} + +// SetDaysAfterInitiation sets the DaysAfterInitiation field's value. +func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortIncompleteMultipartUpload { + s.DaysAfterInitiation = &v + return s +} + +type AbortMultipartUploadInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AbortMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AbortMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AbortMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *AbortMultipartUploadInput) SetBucket(v string) *AbortMultipartUploadInput { + s.Bucket = &v + return s +} + +func (s *AbortMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *AbortMultipartUploadInput) SetKey(v string) *AbortMultipartUploadInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *AbortMultipartUploadInput) SetRequestPayer(v string) *AbortMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadInput { + s.UploadId = &v + return s +} + +type AbortMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s AbortMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +type AccelerateConfiguration struct { + _ struct{} `type:"structure"` + + // The accelerate configuration of the bucket. + Status *string `type:"string" enum:"BucketAccelerateStatus"` +} + +// String returns the string representation +func (s AccelerateConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccelerateConfiguration) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration { + s.Status = &v + return s +} + +type AccessControlPolicy struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s AccessControlPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessControlPolicy) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AccessControlPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AccessControlPolicy"} + if s.Grants != nil { + for i, v := range s.Grants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Grants", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrants sets the Grants field's value. +func (s *AccessControlPolicy) SetGrants(v []*Grant) *AccessControlPolicy { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy { + s.Owner = v + return s +} + +// A container for information about access control for replicas. +type AccessControlTranslation struct { + _ struct{} `type:"structure"` + + // The override value for the owner of the replica object. + // + // Owner is a required field + Owner *string `type:"string" required:"true" enum:"OwnerOverride"` +} + +// String returns the string representation +func (s AccessControlTranslation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessControlTranslation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AccessControlTranslation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AccessControlTranslation"} + if s.Owner == nil { + invalidParams.Add(request.NewErrParamRequired("Owner")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOwner sets the Owner field's value. +func (s *AccessControlTranslation) SetOwner(v string) *AccessControlTranslation { + s.Owner = &v + return s +} + +type AnalyticsAndOperator struct { + _ struct{} `type:"structure"` + + // The prefix to use when evaluating an AND predicate. + Prefix *string `type:"string"` + + // The list of tags to use when evaluating an AND predicate. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s AnalyticsAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsAndOperator) SetPrefix(v string) *AnalyticsAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator { + s.Tags = v + return s +} + +type AnalyticsConfiguration struct { + _ struct{} `type:"structure"` + + // The filter used to describe a set of objects for analyses. A filter must + // have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). + // If no filter is provided, all objects will be considered in any analysis. + Filter *AnalyticsFilter `type:"structure"` + + // The identifier used to represent an analytics configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // If present, it indicates that data related to access patterns will be collected + // and made available to analyze the tradeoffs between different storage classes. + // + // StorageClassAnalysis is a required field + StorageClassAnalysis *StorageClassAnalysis `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AnalyticsConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsConfiguration"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.StorageClassAnalysis == nil { + invalidParams.Add(request.NewErrParamRequired("StorageClassAnalysis")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.StorageClassAnalysis != nil { + if err := s.StorageClassAnalysis.Validate(); err != nil { + invalidParams.AddNested("StorageClassAnalysis", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *AnalyticsConfiguration) SetFilter(v *AnalyticsFilter) *AnalyticsConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *AnalyticsConfiguration) SetId(v string) *AnalyticsConfiguration { + s.Id = &v + return s +} + +// SetStorageClassAnalysis sets the StorageClassAnalysis field's value. +func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis) *AnalyticsConfiguration { + s.StorageClassAnalysis = v + return s +} + +type AnalyticsExportDestination struct { + _ struct{} `type:"structure"` + + // A destination signifying output to an S3 bucket. + // + // S3BucketDestination is a required field + S3BucketDestination *AnalyticsS3BucketDestination `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AnalyticsExportDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsExportDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsExportDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsExportDestination"} + if s.S3BucketDestination == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketDestination")) + } + if s.S3BucketDestination != nil { + if err := s.S3BucketDestination.Validate(); err != nil { + invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3BucketDestination sets the S3BucketDestination field's value. +func (s *AnalyticsExportDestination) SetS3BucketDestination(v *AnalyticsS3BucketDestination) *AnalyticsExportDestination { + s.S3BucketDestination = v + return s +} + +type AnalyticsFilter struct { + _ struct{} `type:"structure"` + + // A conjunction (logical AND) of predicates, which is used in evaluating an + // analytics filter. The operator must have at least two predicates. + And *AnalyticsAndOperator `type:"structure"` + + // The prefix to use when evaluating an analytics filter. + Prefix *string `type:"string"` + + // The tag to use when evaluating an analytics filter. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s AnalyticsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *AnalyticsFilter) SetAnd(v *AnalyticsAndOperator) *AnalyticsFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsFilter) SetPrefix(v string) *AnalyticsFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter { + s.Tag = v + return s +} + +type AnalyticsS3BucketDestination struct { + _ struct{} `type:"structure"` + + // The Amazon resource name (ARN) of the bucket to which data is exported. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // The account ID that owns the destination bucket. If no account ID is provided, + // the owner will not be validated prior to exporting data. + BucketAccountId *string `type:"string"` + + // The file format used when exporting data to Amazon S3. + // + // Format is a required field + Format *string `type:"string" required:"true" enum:"AnalyticsS3ExportFileFormat"` + + // The prefix to use when exporting data. The exported data begins with this + // prefix. + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s AnalyticsS3BucketDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsS3BucketDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsS3BucketDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsS3BucketDestination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Format == nil { + invalidParams.Add(request.NewErrParamRequired("Format")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *AnalyticsS3BucketDestination) SetBucket(v string) *AnalyticsS3BucketDestination { + s.Bucket = &v + return s +} + +func (s *AnalyticsS3BucketDestination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketAccountId sets the BucketAccountId field's value. +func (s *AnalyticsS3BucketDestination) SetBucketAccountId(v string) *AnalyticsS3BucketDestination { + s.BucketAccountId = &v + return s +} + +// SetFormat sets the Format field's value. +func (s *AnalyticsS3BucketDestination) SetFormat(v string) *AnalyticsS3BucketDestination { + s.Format = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDestination { + s.Prefix = &v + return s +} + +type Bucket struct { + _ struct{} `type:"structure"` + + // Date the bucket was created. + CreationDate *time.Time `type:"timestamp"` + + // The name of the bucket. + Name *string `type:"string"` +} + +// String returns the string representation +func (s Bucket) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Bucket) GoString() string { + return s.String() +} + +// SetCreationDate sets the CreationDate field's value. +func (s *Bucket) SetCreationDate(v time.Time) *Bucket { + s.CreationDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *Bucket) SetName(v string) *Bucket { + s.Name = &v + return s +} + +type BucketLifecycleConfiguration struct { + _ struct{} `type:"structure"` + + // Rules is a required field + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s BucketLifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketLifecycleConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketLifecycleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketLifecycleConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifecycleConfiguration { + s.Rules = v + return s +} + +type BucketLoggingStatus struct { + _ struct{} `type:"structure"` + + // Container for logging information. Presence of this element indicates that + // logging is enabled. Parameters TargetBucket and TargetPrefix are required + // in this case. + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation +func (s BucketLoggingStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketLoggingStatus) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketLoggingStatus) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketLoggingStatus"} + if s.LoggingEnabled != nil { + if err := s.LoggingEnabled.Validate(); err != nil { + invalidParams.AddNested("LoggingEnabled", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLoggingEnabled sets the LoggingEnabled field's value. +func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggingStatus { + s.LoggingEnabled = v + return s +} + +type CORSConfiguration struct { + _ struct{} `type:"structure"` + + // CORSRules is a required field + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s CORSConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CORSConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CORSConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CORSConfiguration"} + if s.CORSRules == nil { + invalidParams.Add(request.NewErrParamRequired("CORSRules")) + } + if s.CORSRules != nil { + for i, v := range s.CORSRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CORSRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCORSRules sets the CORSRules field's value. +func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration { + s.CORSRules = v + return s +} + +type CORSRule struct { + _ struct{} `type:"structure"` + + // Specifies which headers are allowed in a pre-flight OPTIONS request. + AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` + + // Identifies HTTP methods that the domain/origin specified in the rule is allowed + // to execute. + // + // AllowedMethods is a required field + AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"` + + // One or more origins you want customers to be able to access the bucket from. + // + // AllowedOrigins is a required field + AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true" required:"true"` + + // One or more headers in the response that you want customers to be able to + // access from their applications (for example, from a JavaScript XMLHttpRequest + // object). + ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"` + + // The time in seconds that your browser is to cache the preflight response + // for the specified resource. + MaxAgeSeconds *int64 `type:"integer"` +} + +// String returns the string representation +func (s CORSRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CORSRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CORSRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CORSRule"} + if s.AllowedMethods == nil { + invalidParams.Add(request.NewErrParamRequired("AllowedMethods")) + } + if s.AllowedOrigins == nil { + invalidParams.Add(request.NewErrParamRequired("AllowedOrigins")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAllowedHeaders sets the AllowedHeaders field's value. +func (s *CORSRule) SetAllowedHeaders(v []*string) *CORSRule { + s.AllowedHeaders = v + return s +} + +// SetAllowedMethods sets the AllowedMethods field's value. +func (s *CORSRule) SetAllowedMethods(v []*string) *CORSRule { + s.AllowedMethods = v + return s +} + +// SetAllowedOrigins sets the AllowedOrigins field's value. +func (s *CORSRule) SetAllowedOrigins(v []*string) *CORSRule { + s.AllowedOrigins = v + return s +} + +// SetExposeHeaders sets the ExposeHeaders field's value. +func (s *CORSRule) SetExposeHeaders(v []*string) *CORSRule { + s.ExposeHeaders = v + return s +} + +// SetMaxAgeSeconds sets the MaxAgeSeconds field's value. +func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule { + s.MaxAgeSeconds = &v + return s +} + +// Describes how a CSV-formatted input object is formatted. +type CSVInput struct { + _ struct{} `type:"structure"` + + // Specifies that CSV field values may contain quoted record delimiters and + // such records should be allowed. Default value is FALSE. Setting this value + // to TRUE may lower performance. + AllowQuotedRecordDelimiter *bool `type:"boolean"` + + // The single character used to indicate a row should be ignored when present + // at the start of a row. + Comments *string `type:"string"` + + // The value used to separate individual fields in a record. + FieldDelimiter *string `type:"string"` + + // Describes the first line of input. Valid values: None, Ignore, Use. + FileHeaderInfo *string `type:"string" enum:"FileHeaderInfo"` + + // Value used for escaping where the field delimiter is part of the value. + QuoteCharacter *string `type:"string"` + + // The single character used for escaping the quote character inside an already + // escaped value. + QuoteEscapeCharacter *string `type:"string"` + + // The value used to separate individual records. + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation +func (s CSVInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CSVInput) GoString() string { + return s.String() +} + +// SetAllowQuotedRecordDelimiter sets the AllowQuotedRecordDelimiter field's value. +func (s *CSVInput) SetAllowQuotedRecordDelimiter(v bool) *CSVInput { + s.AllowQuotedRecordDelimiter = &v + return s +} + +// SetComments sets the Comments field's value. +func (s *CSVInput) SetComments(v string) *CSVInput { + s.Comments = &v + return s +} + +// SetFieldDelimiter sets the FieldDelimiter field's value. +func (s *CSVInput) SetFieldDelimiter(v string) *CSVInput { + s.FieldDelimiter = &v + return s +} + +// SetFileHeaderInfo sets the FileHeaderInfo field's value. +func (s *CSVInput) SetFileHeaderInfo(v string) *CSVInput { + s.FileHeaderInfo = &v + return s +} + +// SetQuoteCharacter sets the QuoteCharacter field's value. +func (s *CSVInput) SetQuoteCharacter(v string) *CSVInput { + s.QuoteCharacter = &v + return s +} + +// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value. +func (s *CSVInput) SetQuoteEscapeCharacter(v string) *CSVInput { + s.QuoteEscapeCharacter = &v + return s +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *CSVInput) SetRecordDelimiter(v string) *CSVInput { + s.RecordDelimiter = &v + return s +} + +// Describes how CSV-formatted results are formatted. +type CSVOutput struct { + _ struct{} `type:"structure"` + + // The value used to separate individual fields in a record. + FieldDelimiter *string `type:"string"` + + // The value used for escaping where the field delimiter is part of the value. + QuoteCharacter *string `type:"string"` + + // Th single character used for escaping the quote character inside an already + // escaped value. + QuoteEscapeCharacter *string `type:"string"` + + // Indicates whether or not all output fields should be quoted. + QuoteFields *string `type:"string" enum:"QuoteFields"` + + // The value used to separate individual records. + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation +func (s CSVOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CSVOutput) GoString() string { + return s.String() +} + +// SetFieldDelimiter sets the FieldDelimiter field's value. +func (s *CSVOutput) SetFieldDelimiter(v string) *CSVOutput { + s.FieldDelimiter = &v + return s +} + +// SetQuoteCharacter sets the QuoteCharacter field's value. +func (s *CSVOutput) SetQuoteCharacter(v string) *CSVOutput { + s.QuoteCharacter = &v + return s +} + +// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value. +func (s *CSVOutput) SetQuoteEscapeCharacter(v string) *CSVOutput { + s.QuoteEscapeCharacter = &v + return s +} + +// SetQuoteFields sets the QuoteFields field's value. +func (s *CSVOutput) SetQuoteFields(v string) *CSVOutput { + s.QuoteFields = &v + return s +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *CSVOutput) SetRecordDelimiter(v string) *CSVOutput { + s.RecordDelimiter = &v + return s +} + +type CloudFunctionConfiguration struct { + _ struct{} `type:"structure"` + + CloudFunction *string `type:"string"` + + // The bucket event for which to send notifications. + // + // Deprecated: Event has been deprecated + Event *string `deprecated:"true" type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + InvocationRole *string `type:"string"` +} + +// String returns the string representation +func (s CloudFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloudFunctionConfiguration) GoString() string { + return s.String() +} + +// SetCloudFunction sets the CloudFunction field's value. +func (s *CloudFunctionConfiguration) SetCloudFunction(v string) *CloudFunctionConfiguration { + s.CloudFunction = &v + return s +} + +// SetEvent sets the Event field's value. +func (s *CloudFunctionConfiguration) SetEvent(v string) *CloudFunctionConfiguration { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *CloudFunctionConfiguration) SetEvents(v []*string) *CloudFunctionConfiguration { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *CloudFunctionConfiguration) SetId(v string) *CloudFunctionConfiguration { + s.Id = &v + return s +} + +// SetInvocationRole sets the InvocationRole field's value. +func (s *CloudFunctionConfiguration) SetInvocationRole(v string) *CloudFunctionConfiguration { + s.InvocationRole = &v + return s +} + +type CommonPrefix struct { + _ struct{} `type:"structure"` + + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s CommonPrefix) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CommonPrefix) GoString() string { + return s.String() +} + +// SetPrefix sets the Prefix field's value. +func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix { + s.Prefix = &v + return s +} + +type CompleteMultipartUploadInput struct { + _ struct{} `type:"structure" payload:"MultipartUpload"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CompleteMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CompleteMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *CompleteMultipartUploadInput) SetBucket(v string) *CompleteMultipartUploadInput { + s.Bucket = &v + return s +} + +func (s *CompleteMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *CompleteMultipartUploadInput) SetKey(v string) *CompleteMultipartUploadInput { + s.Key = &v + return s +} + +// SetMultipartUpload sets the MultipartUpload field's value. +func (s *CompleteMultipartUploadInput) SetMultipartUpload(v *CompletedMultipartUpload) *CompleteMultipartUploadInput { + s.MultipartUpload = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CompleteMultipartUploadInput) SetRequestPayer(v string) *CompleteMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartUploadInput { + s.UploadId = &v + return s +} + +type CompleteMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + Bucket *string `type:"string"` + + // Entity tag of the object. + ETag *string `type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + Key *string `min:"1" type:"string"` + + Location *string `type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s CompleteMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *CompleteMultipartUploadOutput) SetBucket(v string) *CompleteMultipartUploadOutput { + s.Bucket = &v + return s +} + +func (s *CompleteMultipartUploadOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetETag sets the ETag field's value. +func (s *CompleteMultipartUploadOutput) SetETag(v string) *CompleteMultipartUploadOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *CompleteMultipartUploadOutput) SetExpiration(v string) *CompleteMultipartUploadOutput { + s.Expiration = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CompleteMultipartUploadOutput) SetKey(v string) *CompleteMultipartUploadOutput { + s.Key = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *CompleteMultipartUploadOutput) SetLocation(v string) *CompleteMultipartUploadOutput { + s.Location = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CompleteMultipartUploadOutput) SetRequestCharged(v string) *CompleteMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CompleteMultipartUploadOutput) SetSSEKMSKeyId(v string) *CompleteMultipartUploadOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CompleteMultipartUploadOutput) SetServerSideEncryption(v string) *CompleteMultipartUploadOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipartUploadOutput { + s.VersionId = &v + return s +} + +type CompletedMultipartUpload struct { + _ struct{} `type:"structure"` + + Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s CompletedMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompletedMultipartUpload) GoString() string { + return s.String() +} + +// SetParts sets the Parts field's value. +func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultipartUpload { + s.Parts = v + return s +} + +type CompletedPart struct { + _ struct{} `type:"structure"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Part number that identifies the part. This is a positive integer between + // 1 and 10,000. + PartNumber *int64 `type:"integer"` +} + +// String returns the string representation +func (s CompletedPart) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompletedPart) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *CompletedPart) SetETag(v string) *CompletedPart { + s.ETag = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart { + s.PartNumber = &v + return s +} + +type Condition struct { + _ struct{} `type:"structure"` + + // The HTTP error code when the redirect is applied. In the event of an error, + // if the error code equals this value, then the specified redirect is applied. + // Required when parent element Condition is specified and sibling KeyPrefixEquals + // is not specified. If both are specified, then both must be true for the redirect + // to be applied. + HttpErrorCodeReturnedEquals *string `type:"string"` + + // The object key name prefix when the redirect is applied. For example, to + // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. + // To redirect request for all pages with the prefix docs/, the key prefix will + // be /docs, which identifies all objects in the docs/ folder. Required when + // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals + // is not specified. If both conditions are specified, both must be true for + // the redirect to be applied. + KeyPrefixEquals *string `type:"string"` +} + +// String returns the string representation +func (s Condition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Condition) GoString() string { + return s.String() +} + +// SetHttpErrorCodeReturnedEquals sets the HttpErrorCodeReturnedEquals field's value. +func (s *Condition) SetHttpErrorCodeReturnedEquals(v string) *Condition { + s.HttpErrorCodeReturnedEquals = &v + return s +} + +// SetKeyPrefixEquals sets the KeyPrefixEquals field's value. +func (s *Condition) SetKeyPrefixEquals(v string) *Condition { + s.KeyPrefixEquals = &v + return s +} + +type ContinuationEvent struct { + _ struct{} `locationName:"ContinuationEvent" type:"structure"` +} + +// String returns the string representation +func (s ContinuationEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContinuationEvent) GoString() string { + return s.String() +} + +// The ContinuationEvent is and event in the SelectObjectContentEventStream group of events. +func (s *ContinuationEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the ContinuationEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *ContinuationEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + return nil +} + +type CopyObjectInput struct { + _ struct{} `type:"structure"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The name of the source bucket and key name of the source object, separated + // by a slash (/). Must be URL-encoded. + // + // CopySource is a required field + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` + + // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether the metadata is copied from the source object or replaced + // with metadata provided in the request. + MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` + + // Specifies whether you want to apply a Legal Hold to the copied object. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The Object Lock mode that you want to apply to the copied object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when you want the copied object's Object Lock to expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object destination object this value must be used in + // conjunction with the TaggingDirective. The tag-set must be encoded as URL + // Query parameters + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // Specifies whether the object tag-set are copied from the source object or + // replaced with tag-set provided in the request. + TaggingDirective *string `location:"header" locationName:"x-amz-tagging-directive" type:"string" enum:"TaggingDirective"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s CopyObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopyObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopyObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.CopySource == nil { + invalidParams.Add(request.NewErrParamRequired("CopySource")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CopyObjectInput) SetACL(v string) *CopyObjectInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CopyObjectInput) SetBucket(v string) *CopyObjectInput { + s.Bucket = &v + return s +} + +func (s *CopyObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCacheControl sets the CacheControl field's value. +func (s *CopyObjectInput) SetCacheControl(v string) *CopyObjectInput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *CopyObjectInput) SetContentDisposition(v string) *CopyObjectInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *CopyObjectInput) SetContentEncoding(v string) *CopyObjectInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *CopyObjectInput) SetContentLanguage(v string) *CopyObjectInput { + s.ContentLanguage = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *CopyObjectInput) SetContentType(v string) *CopyObjectInput { + s.ContentType = &v + return s +} + +// SetCopySource sets the CopySource field's value. +func (s *CopyObjectInput) SetCopySource(v string) *CopyObjectInput { + s.CopySource = &v + return s +} + +// SetCopySourceIfMatch sets the CopySourceIfMatch field's value. +func (s *CopyObjectInput) SetCopySourceIfMatch(v string) *CopyObjectInput { + s.CopySourceIfMatch = &v + return s +} + +// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value. +func (s *CopyObjectInput) SetCopySourceIfModifiedSince(v time.Time) *CopyObjectInput { + s.CopySourceIfModifiedSince = &v + return s +} + +// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value. +func (s *CopyObjectInput) SetCopySourceIfNoneMatch(v string) *CopyObjectInput { + s.CopySourceIfNoneMatch = &v + return s +} + +// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value. +func (s *CopyObjectInput) SetCopySourceIfUnmodifiedSince(v time.Time) *CopyObjectInput { + s.CopySourceIfUnmodifiedSince = &v + return s +} + +// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerAlgorithm(v string) *CopyObjectInput { + s.CopySourceSSECustomerAlgorithm = &v + return s +} + +// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerKey(v string) *CopyObjectInput { + s.CopySourceSSECustomerKey = &v + return s +} + +func (s *CopyObjectInput) getCopySourceSSECustomerKey() (v string) { + if s.CopySourceSSECustomerKey == nil { + return v + } + return *s.CopySourceSSECustomerKey +} - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerKeyMD5(v string) *CopyObjectInput { + s.CopySourceSSECustomerKeyMD5 = &v + return s +} - // UploadId is a required field - UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +// SetExpires sets the Expires field's value. +func (s *CopyObjectInput) SetExpires(v time.Time) *CopyObjectInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CopyObjectInput) SetGrantFullControl(v string) *CopyObjectInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CopyObjectInput) SetGrantRead(v string) *CopyObjectInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CopyObjectInput) SetGrantReadACP(v string) *CopyObjectInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CopyObjectInput) SetGrantWriteACP(v string) *CopyObjectInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CopyObjectInput) SetKey(v string) *CopyObjectInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *CopyObjectInput) SetMetadata(v map[string]*string) *CopyObjectInput { + s.Metadata = v + return s +} + +// SetMetadataDirective sets the MetadataDirective field's value. +func (s *CopyObjectInput) SetMetadataDirective(v string) *CopyObjectInput { + s.MetadataDirective = &v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *CopyObjectInput) SetObjectLockLegalHoldStatus(v string) *CopyObjectInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *CopyObjectInput) SetObjectLockMode(v string) *CopyObjectInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *CopyObjectInput) SetObjectLockRetainUntilDate(v time.Time) *CopyObjectInput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CopyObjectInput) SetRequestPayer(v string) *CopyObjectInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CopyObjectInput) SetSSECustomerAlgorithm(v string) *CopyObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *CopyObjectInput) SetSSECustomerKey(v string) *CopyObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *CopyObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CopyObjectInput) SetSSEKMSKeyId(v string) *CopyObjectInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CopyObjectInput) SetServerSideEncryption(v string) *CopyObjectInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *CopyObjectInput) SetStorageClass(v string) *CopyObjectInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *CopyObjectInput) SetTagging(v string) *CopyObjectInput { + s.Tagging = &v + return s +} + +// SetTaggingDirective sets the TaggingDirective field's value. +func (s *CopyObjectInput) SetTaggingDirective(v string) *CopyObjectInput { + s.TaggingDirective = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput { + s.WebsiteRedirectLocation = &v + return s +} + +type CopyObjectOutput struct { + _ struct{} `type:"structure" payload:"CopyObjectResult"` + + CopyObjectResult *CopyObjectResult `type:"structure"` + + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If the object expiration is configured, the response includes this header. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version ID of the newly created copy. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } // String returns the string representation -func (s AbortMultipartUploadInput) String() string { +func (s CopyObjectOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AbortMultipartUploadInput) GoString() string { +func (s CopyObjectOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *AbortMultipartUploadInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AbortMultipartUploadInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.UploadId == nil { - invalidParams.Add(request.NewErrParamRequired("UploadId")) - } +// SetCopyObjectResult sets the CopyObjectResult field's value. +func (s *CopyObjectOutput) SetCopyObjectResult(v *CopyObjectResult) *CopyObjectOutput { + s.CopyObjectResult = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetCopySourceVersionId sets the CopySourceVersionId field's value. +func (s *CopyObjectOutput) SetCopySourceVersionId(v string) *CopyObjectOutput { + s.CopySourceVersionId = &v + return s } -// SetBucket sets the Bucket field's value. -func (s *AbortMultipartUploadInput) SetBucket(v string) *AbortMultipartUploadInput { - s.Bucket = &v +// SetExpiration sets the Expiration field's value. +func (s *CopyObjectOutput) SetExpiration(v string) *CopyObjectOutput { + s.Expiration = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CopyObjectOutput) SetRequestCharged(v string) *CopyObjectOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CopyObjectOutput) SetSSECustomerAlgorithm(v string) *CopyObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CopyObjectOutput) SetSSECustomerKeyMD5(v string) *CopyObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CopyObjectOutput) SetSSEKMSKeyId(v string) *CopyObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CopyObjectOutput) SetServerSideEncryption(v string) *CopyObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput { + s.VersionId = &v return s } -// SetKey sets the Key field's value. -func (s *AbortMultipartUploadInput) SetKey(v string) *AbortMultipartUploadInput { - s.Key = &v - return s +type CopyObjectResult struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + LastModified *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s CopyObjectResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectResult) GoString() string { + return s.String() } -// SetRequestPayer sets the RequestPayer field's value. -func (s *AbortMultipartUploadInput) SetRequestPayer(v string) *AbortMultipartUploadInput { - s.RequestPayer = &v +// SetETag sets the ETag field's value. +func (s *CopyObjectResult) SetETag(v string) *CopyObjectResult { + s.ETag = &v return s } -// SetUploadId sets the UploadId field's value. -func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadInput { - s.UploadId = &v +// SetLastModified sets the LastModified field's value. +func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult { + s.LastModified = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadOutput -type AbortMultipartUploadOutput struct { +type CopyPartResult struct { _ struct{} `type:"structure"` - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + // Entity tag of the object. + ETag *string `type:"string"` + + // Date and time at which the object was uploaded. + LastModified *time.Time `type:"timestamp"` } // String returns the string representation -func (s AbortMultipartUploadOutput) String() string { +func (s CopyPartResult) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AbortMultipartUploadOutput) GoString() string { +func (s CopyPartResult) GoString() string { return s.String() } -// SetRequestCharged sets the RequestCharged field's value. -func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipartUploadOutput { - s.RequestCharged = &v +// SetETag sets the ETag field's value. +func (s *CopyPartResult) SetETag(v string) *CopyPartResult { + s.ETag = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccelerateConfiguration -type AccelerateConfiguration struct { +// SetLastModified sets the LastModified field's value. +func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult { + s.LastModified = &v + return s +} + +type CreateBucketConfiguration struct { _ struct{} `type:"structure"` - // The accelerate configuration of the bucket. - Status *string `type:"string" enum:"BucketAccelerateStatus"` + // Specifies the region where the bucket will be created. If you don't specify + // a region, the bucket will be created in US Standard. + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` } // String returns the string representation -func (s AccelerateConfiguration) String() string { +func (s CreateBucketConfiguration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AccelerateConfiguration) GoString() string { +func (s CreateBucketConfiguration) GoString() string { return s.String() } -// SetStatus sets the Status field's value. -func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration { - s.Status = &v +// SetLocationConstraint sets the LocationConstraint field's value. +func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucketConfiguration { + s.LocationConstraint = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccessControlPolicy -type AccessControlPolicy struct { - _ struct{} `type:"structure"` +type CreateBucketInput struct { + _ struct{} `type:"structure" payload:"CreateBucketConfiguration"` - // A list of grants. - Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` - Owner *Owner `type:"structure"` + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Specifies whether you want S3 Object Lock to be enabled for the new bucket. + ObjectLockEnabledForBucket *bool `location:"header" locationName:"x-amz-bucket-object-lock-enabled" type:"boolean"` } // String returns the string representation -func (s AccessControlPolicy) String() string { +func (s CreateBucketInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AccessControlPolicy) GoString() string { +func (s CreateBucketInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *AccessControlPolicy) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AccessControlPolicy"} - if s.Grants != nil { - for i, v := range s.Grants { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Grants", i), err.(request.ErrInvalidParams)) - } - } +func (s *CreateBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) } if invalidParams.Len() > 0 { @@ -6210,120 +9014,212 @@ func (s *AccessControlPolicy) Validate() error { return nil } -// SetGrants sets the Grants field's value. -func (s *AccessControlPolicy) SetGrants(v []*Grant) *AccessControlPolicy { - s.Grants = v +// SetACL sets the ACL field's value. +func (s *CreateBucketInput) SetACL(v string) *CreateBucketInput { + s.ACL = &v return s } -// SetOwner sets the Owner field's value. -func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy { - s.Owner = v +// SetBucket sets the Bucket field's value. +func (s *CreateBucketInput) SetBucket(v string) *CreateBucketInput { + s.Bucket = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsAndOperator -type AnalyticsAndOperator struct { - _ struct{} `type:"structure"` +func (s *CreateBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} - // The prefix to use when evaluating an AND predicate. - Prefix *string `type:"string"` +// SetCreateBucketConfiguration sets the CreateBucketConfiguration field's value. +func (s *CreateBucketInput) SetCreateBucketConfiguration(v *CreateBucketConfiguration) *CreateBucketInput { + s.CreateBucketConfiguration = v + return s +} - // The list of tags to use when evaluating an AND predicate. - Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CreateBucketInput) SetGrantFullControl(v string) *CreateBucketInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CreateBucketInput) SetGrantRead(v string) *CreateBucketInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CreateBucketInput) SetGrantReadACP(v string) *CreateBucketInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *CreateBucketInput) SetGrantWrite(v string) *CreateBucketInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CreateBucketInput) SetGrantWriteACP(v string) *CreateBucketInput { + s.GrantWriteACP = &v + return s +} + +// SetObjectLockEnabledForBucket sets the ObjectLockEnabledForBucket field's value. +func (s *CreateBucketInput) SetObjectLockEnabledForBucket(v bool) *CreateBucketInput { + s.ObjectLockEnabledForBucket = &v + return s +} + +type CreateBucketOutput struct { + _ struct{} `type:"structure"` + + Location *string `location:"header" locationName:"Location" type:"string"` } // String returns the string representation -func (s AnalyticsAndOperator) String() string { +func (s CreateBucketOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AnalyticsAndOperator) GoString() string { +func (s CreateBucketOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *AnalyticsAndOperator) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AnalyticsAndOperator"} - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetLocation sets the Location field's value. +func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput { + s.Location = &v + return s } -// SetPrefix sets the Prefix field's value. -func (s *AnalyticsAndOperator) SetPrefix(v string) *AnalyticsAndOperator { - s.Prefix = &v - return s -} +type CreateMultipartUploadInput struct { + _ struct{} `type:"structure"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether you want to apply a Legal Hold to the uploaded object. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // Specifies the Object Lock mode that you want to apply to the uploaded object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // Specifies the date and time when you want the Object Lock to expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` -// SetTags sets the Tags field's value. -func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator { - s.Tags = v - return s -} + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsConfiguration -type AnalyticsConfiguration struct { - _ struct{} `type:"structure"` + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The filter used to describe a set of objects for analyses. A filter must - // have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). - // If no filter is provided, all objects will be considered in any analysis. - Filter *AnalyticsFilter `type:"structure"` + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - // The identifier used to represent an analytics configuration. - // - // Id is a required field - Id *string `type:"string" required:"true"` + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - // If present, it indicates that data related to access patterns will be collected - // and made available to analyze the tradeoffs between different storage classes. - // - // StorageClassAnalysis is a required field - StorageClassAnalysis *StorageClassAnalysis `type:"structure" required:"true"` + // The tag-set for the object. The tag-set must be encoded as URL Query parameters + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } // String returns the string representation -func (s AnalyticsConfiguration) String() string { +func (s CreateMultipartUploadInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AnalyticsConfiguration) GoString() string { +func (s CreateMultipartUploadInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *AnalyticsConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AnalyticsConfiguration"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) +func (s *CreateMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.StorageClassAnalysis == nil { - invalidParams.Add(request.NewErrParamRequired("StorageClassAnalysis")) + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) } - if s.Filter != nil { - if err := s.Filter.Validate(); err != nil { - invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) - } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) } - if s.StorageClassAnalysis != nil { - if err := s.StorageClassAnalysis.Validate(); err != nil { - invalidParams.AddNested("StorageClassAnalysis", err.(request.ErrInvalidParams)) - } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) } if invalidParams.Len() > 0 { @@ -6332,353 +9228,373 @@ func (s *AnalyticsConfiguration) Validate() error { return nil } -// SetFilter sets the Filter field's value. -func (s *AnalyticsConfiguration) SetFilter(v *AnalyticsFilter) *AnalyticsConfiguration { - s.Filter = v +// SetACL sets the ACL field's value. +func (s *CreateMultipartUploadInput) SetACL(v string) *CreateMultipartUploadInput { + s.ACL = &v return s } -// SetId sets the Id field's value. -func (s *AnalyticsConfiguration) SetId(v string) *AnalyticsConfiguration { - s.Id = &v +// SetBucket sets the Bucket field's value. +func (s *CreateMultipartUploadInput) SetBucket(v string) *CreateMultipartUploadInput { + s.Bucket = &v return s } -// SetStorageClassAnalysis sets the StorageClassAnalysis field's value. -func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis) *AnalyticsConfiguration { - s.StorageClassAnalysis = v - return s +func (s *CreateMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsExportDestination -type AnalyticsExportDestination struct { - _ struct{} `type:"structure"` - - // A destination signifying output to an S3 bucket. - // - // S3BucketDestination is a required field - S3BucketDestination *AnalyticsS3BucketDestination `type:"structure" required:"true"` +// SetCacheControl sets the CacheControl field's value. +func (s *CreateMultipartUploadInput) SetCacheControl(v string) *CreateMultipartUploadInput { + s.CacheControl = &v + return s } -// String returns the string representation -func (s AnalyticsExportDestination) String() string { - return awsutil.Prettify(s) +// SetContentDisposition sets the ContentDisposition field's value. +func (s *CreateMultipartUploadInput) SetContentDisposition(v string) *CreateMultipartUploadInput { + s.ContentDisposition = &v + return s } -// GoString returns the string representation -func (s AnalyticsExportDestination) GoString() string { - return s.String() +// SetContentEncoding sets the ContentEncoding field's value. +func (s *CreateMultipartUploadInput) SetContentEncoding(v string) *CreateMultipartUploadInput { + s.ContentEncoding = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *AnalyticsExportDestination) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AnalyticsExportDestination"} - if s.S3BucketDestination == nil { - invalidParams.Add(request.NewErrParamRequired("S3BucketDestination")) - } - if s.S3BucketDestination != nil { - if err := s.S3BucketDestination.Validate(); err != nil { - invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetContentLanguage sets the ContentLanguage field's value. +func (s *CreateMultipartUploadInput) SetContentLanguage(v string) *CreateMultipartUploadInput { + s.ContentLanguage = &v + return s } -// SetS3BucketDestination sets the S3BucketDestination field's value. -func (s *AnalyticsExportDestination) SetS3BucketDestination(v *AnalyticsS3BucketDestination) *AnalyticsExportDestination { - s.S3BucketDestination = v +// SetContentType sets the ContentType field's value. +func (s *CreateMultipartUploadInput) SetContentType(v string) *CreateMultipartUploadInput { + s.ContentType = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsFilter -type AnalyticsFilter struct { - _ struct{} `type:"structure"` - - // A conjunction (logical AND) of predicates, which is used in evaluating an - // analytics filter. The operator must have at least two predicates. - And *AnalyticsAndOperator `type:"structure"` - - // The prefix to use when evaluating an analytics filter. - Prefix *string `type:"string"` - - // The tag to use when evaluating an analytics filter. - Tag *Tag `type:"structure"` +// SetExpires sets the Expires field's value. +func (s *CreateMultipartUploadInput) SetExpires(v time.Time) *CreateMultipartUploadInput { + s.Expires = &v + return s } -// String returns the string representation -func (s AnalyticsFilter) String() string { - return awsutil.Prettify(s) +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CreateMultipartUploadInput) SetGrantFullControl(v string) *CreateMultipartUploadInput { + s.GrantFullControl = &v + return s } -// GoString returns the string representation -func (s AnalyticsFilter) GoString() string { - return s.String() +// SetGrantRead sets the GrantRead field's value. +func (s *CreateMultipartUploadInput) SetGrantRead(v string) *CreateMultipartUploadInput { + s.GrantRead = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *AnalyticsFilter) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AnalyticsFilter"} - if s.And != nil { - if err := s.And.Validate(); err != nil { - invalidParams.AddNested("And", err.(request.ErrInvalidParams)) - } - } - if s.Tag != nil { - if err := s.Tag.Validate(); err != nil { - invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CreateMultipartUploadInput) SetGrantReadACP(v string) *CreateMultipartUploadInput { + s.GrantReadACP = &v + return s } -// SetAnd sets the And field's value. -func (s *AnalyticsFilter) SetAnd(v *AnalyticsAndOperator) *AnalyticsFilter { - s.And = v +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CreateMultipartUploadInput) SetGrantWriteACP(v string) *CreateMultipartUploadInput { + s.GrantWriteACP = &v return s } -// SetPrefix sets the Prefix field's value. -func (s *AnalyticsFilter) SetPrefix(v string) *AnalyticsFilter { - s.Prefix = &v +// SetKey sets the Key field's value. +func (s *CreateMultipartUploadInput) SetKey(v string) *CreateMultipartUploadInput { + s.Key = &v return s } -// SetTag sets the Tag field's value. -func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter { - s.Tag = v +// SetMetadata sets the Metadata field's value. +func (s *CreateMultipartUploadInput) SetMetadata(v map[string]*string) *CreateMultipartUploadInput { + s.Metadata = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsS3BucketDestination -type AnalyticsS3BucketDestination struct { - _ struct{} `type:"structure"` - - // The Amazon resource name (ARN) of the bucket to which data is exported. - // - // Bucket is a required field - Bucket *string `type:"string" required:"true"` +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *CreateMultipartUploadInput) SetObjectLockLegalHoldStatus(v string) *CreateMultipartUploadInput { + s.ObjectLockLegalHoldStatus = &v + return s +} - // The account ID that owns the destination bucket. If no account ID is provided, - // the owner will not be validated prior to exporting data. - BucketAccountId *string `type:"string"` +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *CreateMultipartUploadInput) SetObjectLockMode(v string) *CreateMultipartUploadInput { + s.ObjectLockMode = &v + return s +} - // The file format used when exporting data to Amazon S3. - // - // Format is a required field - Format *string `type:"string" required:"true" enum:"AnalyticsS3ExportFileFormat"` +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *CreateMultipartUploadInput) SetObjectLockRetainUntilDate(v time.Time) *CreateMultipartUploadInput { + s.ObjectLockRetainUntilDate = &v + return s +} - // The prefix to use when exporting data. The exported data begins with this - // prefix. - Prefix *string `type:"string"` +// SetRequestPayer sets the RequestPayer field's value. +func (s *CreateMultipartUploadInput) SetRequestPayer(v string) *CreateMultipartUploadInput { + s.RequestPayer = &v + return s } -// String returns the string representation -func (s AnalyticsS3BucketDestination) String() string { - return awsutil.Prettify(s) +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadInput { + s.SSECustomerAlgorithm = &v + return s } -// GoString returns the string representation -func (s AnalyticsS3BucketDestination) GoString() string { - return s.String() +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerKey(v string) *CreateMultipartUploadInput { + s.SSECustomerKey = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *AnalyticsS3BucketDestination) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AnalyticsS3BucketDestination"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Format == nil { - invalidParams.Add(request.NewErrParamRequired("Format")) +func (s *CreateMultipartUploadInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v } + return *s.SSECustomerKey +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadInput { + s.SSECustomerKeyMD5 = &v + return s } -// SetBucket sets the Bucket field's value. -func (s *AnalyticsS3BucketDestination) SetBucket(v string) *AnalyticsS3BucketDestination { - s.Bucket = &v +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CreateMultipartUploadInput) SetSSEKMSKeyId(v string) *CreateMultipartUploadInput { + s.SSEKMSKeyId = &v return s } -// SetBucketAccountId sets the BucketAccountId field's value. -func (s *AnalyticsS3BucketDestination) SetBucketAccountId(v string) *AnalyticsS3BucketDestination { - s.BucketAccountId = &v +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CreateMultipartUploadInput) SetServerSideEncryption(v string) *CreateMultipartUploadInput { + s.ServerSideEncryption = &v return s } -// SetFormat sets the Format field's value. -func (s *AnalyticsS3BucketDestination) SetFormat(v string) *AnalyticsS3BucketDestination { - s.Format = &v +// SetStorageClass sets the StorageClass field's value. +func (s *CreateMultipartUploadInput) SetStorageClass(v string) *CreateMultipartUploadInput { + s.StorageClass = &v return s } -// SetPrefix sets the Prefix field's value. -func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDestination { - s.Prefix = &v +// SetTagging sets the Tagging field's value. +func (s *CreateMultipartUploadInput) SetTagging(v string) *CreateMultipartUploadInput { + s.Tagging = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Bucket -type Bucket struct { +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *CreateMultipartUploadInput { + s.WebsiteRedirectLocation = &v + return s +} + +type CreateMultipartUploadOutput struct { _ struct{} `type:"structure"` - // Date the bucket was created. - CreationDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + // Date when multipart upload will become eligible for abort operation by lifecycle. + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` - // The name of the bucket. - Name *string `type:"string"` + // Id of the lifecycle rule that makes a multipart upload eligible for abort + // operation. + AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `locationName:"Bucket" type:"string"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // ID for the initiated multipart upload. + UploadId *string `type:"string"` } // String returns the string representation -func (s Bucket) String() string { +func (s CreateMultipartUploadOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Bucket) GoString() string { +func (s CreateMultipartUploadOutput) GoString() string { return s.String() } -// SetCreationDate sets the CreationDate field's value. -func (s *Bucket) SetCreationDate(v time.Time) *Bucket { - s.CreationDate = &v +// SetAbortDate sets the AbortDate field's value. +func (s *CreateMultipartUploadOutput) SetAbortDate(v time.Time) *CreateMultipartUploadOutput { + s.AbortDate = &v return s } -// SetName sets the Name field's value. -func (s *Bucket) SetName(v string) *Bucket { - s.Name = &v +// SetAbortRuleId sets the AbortRuleId field's value. +func (s *CreateMultipartUploadOutput) SetAbortRuleId(v string) *CreateMultipartUploadOutput { + s.AbortRuleId = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLifecycleConfiguration -type BucketLifecycleConfiguration struct { - _ struct{} `type:"structure"` +// SetBucket sets the Bucket field's value. +func (s *CreateMultipartUploadOutput) SetBucket(v string) *CreateMultipartUploadOutput { + s.Bucket = &v + return s +} - // Rules is a required field - Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +func (s *CreateMultipartUploadOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// String returns the string representation -func (s BucketLifecycleConfiguration) String() string { - return awsutil.Prettify(s) +// SetKey sets the Key field's value. +func (s *CreateMultipartUploadOutput) SetKey(v string) *CreateMultipartUploadOutput { + s.Key = &v + return s } -// GoString returns the string representation -func (s BucketLifecycleConfiguration) GoString() string { - return s.String() +// SetRequestCharged sets the RequestCharged field's value. +func (s *CreateMultipartUploadOutput) SetRequestCharged(v string) *CreateMultipartUploadOutput { + s.RequestCharged = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *BucketLifecycleConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BucketLifecycleConfiguration"} - if s.Rules == nil { - invalidParams.Add(request.NewErrParamRequired("Rules")) - } - if s.Rules != nil { - for i, v := range s.Rules { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) - } - } - } +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CreateMultipartUploadOutput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadOutput { + s.SSECustomerAlgorithm = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CreateMultipartUploadOutput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadOutput { + s.SSECustomerKeyMD5 = &v + return s } -// SetRules sets the Rules field's value. -func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifecycleConfiguration { - s.Rules = v +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput { + s.SSEKMSKeyId = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLoggingStatus -type BucketLoggingStatus struct { +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CreateMultipartUploadOutput) SetServerSideEncryption(v string) *CreateMultipartUploadOutput { + s.ServerSideEncryption = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUploadOutput { + s.UploadId = &v + return s +} + +// The container element for specifying the default Object Lock retention settings +// for new objects placed in the specified bucket. +type DefaultRetention struct { _ struct{} `type:"structure"` - LoggingEnabled *LoggingEnabled `type:"structure"` + // The number of days that you want to specify for the default retention period. + Days *int64 `type:"integer"` + + // The default Object Lock retention mode you want to apply to new objects placed + // in the specified bucket. + Mode *string `type:"string" enum:"ObjectLockRetentionMode"` + + // The number of years that you want to specify for the default retention period. + Years *int64 `type:"integer"` } // String returns the string representation -func (s BucketLoggingStatus) String() string { +func (s DefaultRetention) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BucketLoggingStatus) GoString() string { +func (s DefaultRetention) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *BucketLoggingStatus) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BucketLoggingStatus"} - if s.LoggingEnabled != nil { - if err := s.LoggingEnabled.Validate(); err != nil { - invalidParams.AddNested("LoggingEnabled", err.(request.ErrInvalidParams)) - } - } +// SetDays sets the Days field's value. +func (s *DefaultRetention) SetDays(v int64) *DefaultRetention { + s.Days = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetMode sets the Mode field's value. +func (s *DefaultRetention) SetMode(v string) *DefaultRetention { + s.Mode = &v + return s } -// SetLoggingEnabled sets the LoggingEnabled field's value. -func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggingStatus { - s.LoggingEnabled = v +// SetYears sets the Years field's value. +func (s *DefaultRetention) SetYears(v int64) *DefaultRetention { + s.Years = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSConfiguration -type CORSConfiguration struct { +type Delete struct { _ struct{} `type:"structure"` - // CORSRules is a required field - CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` + // Objects is a required field + Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` + + // Element to enable quiet mode for the request. When you add this element, + // you must set its value to true. + Quiet *bool `type:"boolean"` } // String returns the string representation -func (s CORSConfiguration) String() string { +func (s Delete) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CORSConfiguration) GoString() string { +func (s Delete) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CORSConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CORSConfiguration"} - if s.CORSRules == nil { - invalidParams.Add(request.NewErrParamRequired("CORSRules")) +func (s *Delete) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Delete"} + if s.Objects == nil { + invalidParams.Add(request.NewErrParamRequired("Objects")) } - if s.CORSRules != nil { - for i, v := range s.CORSRules { + if s.Objects != nil { + for i, v := range s.Objects { if v == nil { continue } if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CORSRules", i), err.(request.ErrInvalidParams)) + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Objects", i), err.(request.ErrInvalidParams)) } } } @@ -6689,58 +9605,53 @@ func (s *CORSConfiguration) Validate() error { return nil } -// SetCORSRules sets the CORSRules field's value. -func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration { - s.CORSRules = v +// SetObjects sets the Objects field's value. +func (s *Delete) SetObjects(v []*ObjectIdentifier) *Delete { + s.Objects = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSRule -type CORSRule struct { - _ struct{} `type:"structure"` - - // Specifies which headers are allowed in a pre-flight OPTIONS request. - AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` - - // Identifies HTTP methods that the domain/origin specified in the rule is allowed - // to execute. - // - // AllowedMethods is a required field - AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"` +// SetQuiet sets the Quiet field's value. +func (s *Delete) SetQuiet(v bool) *Delete { + s.Quiet = &v + return s +} - // One or more origins you want customers to be able to access the bucket from. - // - // AllowedOrigins is a required field - AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true" required:"true"` +type DeleteBucketAnalyticsConfigurationInput struct { + _ struct{} `type:"structure"` - // One or more headers in the response that you want customers to be able to - // access from their applications (for example, from a JavaScript XMLHttpRequest - // object). - ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"` + // The name of the bucket from which an analytics configuration is deleted. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The time in seconds that your browser is to cache the preflight response - // for the specified resource. - MaxAgeSeconds *int64 `type:"integer"` + // The identifier used to represent an analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` } // String returns the string representation -func (s CORSRule) String() string { +func (s DeleteBucketAnalyticsConfigurationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CORSRule) GoString() string { +func (s DeleteBucketAnalyticsConfigurationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CORSRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CORSRule"} - if s.AllowedMethods == nil { - invalidParams.Add(request.NewErrParamRequired("AllowedMethods")) +func (s *DeleteBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketAnalyticsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.AllowedOrigins == nil { - invalidParams.Add(request.NewErrParamRequired("AllowedOrigins")) + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) } if invalidParams.Len() > 0 { @@ -6749,163 +9660,127 @@ func (s *CORSRule) Validate() error { return nil } -// SetAllowedHeaders sets the AllowedHeaders field's value. -func (s *CORSRule) SetAllowedHeaders(v []*string) *CORSRule { - s.AllowedHeaders = v +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketAnalyticsConfigurationInput) SetBucket(v string) *DeleteBucketAnalyticsConfigurationInput { + s.Bucket = &v return s } -// SetAllowedMethods sets the AllowedMethods field's value. -func (s *CORSRule) SetAllowedMethods(v []*string) *CORSRule { - s.AllowedMethods = v - return s +func (s *DeleteBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// SetAllowedOrigins sets the AllowedOrigins field's value. -func (s *CORSRule) SetAllowedOrigins(v []*string) *CORSRule { - s.AllowedOrigins = v +// SetId sets the Id field's value. +func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketAnalyticsConfigurationInput { + s.Id = &v return s } -// SetExposeHeaders sets the ExposeHeaders field's value. -func (s *CORSRule) SetExposeHeaders(v []*string) *CORSRule { - s.ExposeHeaders = v - return s +type DeleteBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure"` } -// SetMaxAgeSeconds sets the MaxAgeSeconds field's value. -func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule { - s.MaxAgeSeconds = &v - return s +// String returns the string representation +func (s DeleteBucketAnalyticsConfigurationOutput) String() string { + return awsutil.Prettify(s) } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CloudFunctionConfiguration -type CloudFunctionConfiguration struct { - _ struct{} `type:"structure"` - - CloudFunction *string `type:"string"` - - // Bucket event for which to send notifications. - Event *string `deprecated:"true" type:"string" enum:"Event"` - - Events []*string `locationName:"Event" type:"list" flattened:"true"` +// GoString returns the string representation +func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string { + return s.String() +} - // Optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` +type DeleteBucketCorsInput struct { + _ struct{} `type:"structure"` - InvocationRole *string `type:"string"` + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } // String returns the string representation -func (s CloudFunctionConfiguration) String() string { +func (s DeleteBucketCorsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CloudFunctionConfiguration) GoString() string { +func (s DeleteBucketCorsInput) GoString() string { return s.String() } -// SetCloudFunction sets the CloudFunction field's value. -func (s *CloudFunctionConfiguration) SetCloudFunction(v string) *CloudFunctionConfiguration { - s.CloudFunction = &v - return s -} - -// SetEvent sets the Event field's value. -func (s *CloudFunctionConfiguration) SetEvent(v string) *CloudFunctionConfiguration { - s.Event = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } -// SetEvents sets the Events field's value. -func (s *CloudFunctionConfiguration) SetEvents(v []*string) *CloudFunctionConfiguration { - s.Events = v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetId sets the Id field's value. -func (s *CloudFunctionConfiguration) SetId(v string) *CloudFunctionConfiguration { - s.Id = &v +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketCorsInput) SetBucket(v string) *DeleteBucketCorsInput { + s.Bucket = &v return s } -// SetInvocationRole sets the InvocationRole field's value. -func (s *CloudFunctionConfiguration) SetInvocationRole(v string) *CloudFunctionConfiguration { - s.InvocationRole = &v - return s +func (s *DeleteBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CommonPrefix -type CommonPrefix struct { +type DeleteBucketCorsOutput struct { _ struct{} `type:"structure"` - - Prefix *string `type:"string"` } // String returns the string representation -func (s CommonPrefix) String() string { +func (s DeleteBucketCorsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CommonPrefix) GoString() string { +func (s DeleteBucketCorsOutput) GoString() string { return s.String() } -// SetPrefix sets the Prefix field's value. -func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix { - s.Prefix = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadRequest -type CompleteMultipartUploadInput struct { - _ struct{} `type:"structure" payload:"MultipartUpload"` +type DeleteBucketEncryptionInput struct { + _ struct{} `type:"structure"` + // The name of the bucket containing the server-side encryption configuration + // to delete. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // UploadId is a required field - UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` } // String returns the string representation -func (s CompleteMultipartUploadInput) String() string { +func (s DeleteBucketEncryptionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CompleteMultipartUploadInput) GoString() string { +func (s DeleteBucketEncryptionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CompleteMultipartUploadInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CompleteMultipartUploadInput"} +func (s *DeleteBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketEncryptionInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.UploadId == nil { - invalidParams.Add(request.NewErrParamRequired("UploadId")) + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) } if invalidParams.Len() > 0 { @@ -6915,389 +9790,394 @@ func (s *CompleteMultipartUploadInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *CompleteMultipartUploadInput) SetBucket(v string) *CompleteMultipartUploadInput { +func (s *DeleteBucketEncryptionInput) SetBucket(v string) *DeleteBucketEncryptionInput { s.Bucket = &v return s } -// SetKey sets the Key field's value. -func (s *CompleteMultipartUploadInput) SetKey(v string) *CompleteMultipartUploadInput { - s.Key = &v - return s +func (s *DeleteBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// SetMultipartUpload sets the MultipartUpload field's value. -func (s *CompleteMultipartUploadInput) SetMultipartUpload(v *CompletedMultipartUpload) *CompleteMultipartUploadInput { - s.MultipartUpload = v - return s +type DeleteBucketEncryptionOutput struct { + _ struct{} `type:"structure"` } -// SetRequestPayer sets the RequestPayer field's value. -func (s *CompleteMultipartUploadInput) SetRequestPayer(v string) *CompleteMultipartUploadInput { - s.RequestPayer = &v - return s +// String returns the string representation +func (s DeleteBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) } -// SetUploadId sets the UploadId field's value. -func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartUploadInput { - s.UploadId = &v - return s +// GoString returns the string representation +func (s DeleteBucketEncryptionOutput) GoString() string { + return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadOutput -type CompleteMultipartUploadOutput struct { +type DeleteBucketInput struct { _ struct{} `type:"structure"` - Bucket *string `type:"string"` - - // Entity tag of the object. - ETag *string `type:"string"` - - // If the object expiration is configured, this will contain the expiration - // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - - Key *string `min:"1" type:"string"` - - Location *string `type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // Version of the object. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } // String returns the string representation -func (s CompleteMultipartUploadOutput) String() string { +func (s DeleteBucketInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CompleteMultipartUploadOutput) GoString() string { +func (s DeleteBucketInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetBucket sets the Bucket field's value. -func (s *CompleteMultipartUploadOutput) SetBucket(v string) *CompleteMultipartUploadOutput { +func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput { s.Bucket = &v return s } -// SetETag sets the ETag field's value. -func (s *CompleteMultipartUploadOutput) SetETag(v string) *CompleteMultipartUploadOutput { - s.ETag = &v - return s +func (s *DeleteBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type DeleteBucketInventoryConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket containing the inventory configuration to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` } -// SetExpiration sets the Expiration field's value. -func (s *CompleteMultipartUploadOutput) SetExpiration(v string) *CompleteMultipartUploadOutput { - s.Expiration = &v - return s +// String returns the string representation +func (s DeleteBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) } -// SetKey sets the Key field's value. -func (s *CompleteMultipartUploadOutput) SetKey(v string) *CompleteMultipartUploadOutput { - s.Key = &v - return s +// GoString returns the string representation +func (s DeleteBucketInventoryConfigurationInput) GoString() string { + return s.String() } -// SetLocation sets the Location field's value. -func (s *CompleteMultipartUploadOutput) SetLocation(v string) *CompleteMultipartUploadOutput { - s.Location = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } -// SetRequestCharged sets the RequestCharged field's value. -func (s *CompleteMultipartUploadOutput) SetRequestCharged(v string) *CompleteMultipartUploadOutput { - s.RequestCharged = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *CompleteMultipartUploadOutput) SetSSEKMSKeyId(v string) *CompleteMultipartUploadOutput { - s.SSEKMSKeyId = &v +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketInventoryConfigurationInput) SetBucket(v string) *DeleteBucketInventoryConfigurationInput { + s.Bucket = &v return s } -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *CompleteMultipartUploadOutput) SetServerSideEncryption(v string) *CompleteMultipartUploadOutput { - s.ServerSideEncryption = &v - return s +func (s *DeleteBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// SetVersionId sets the VersionId field's value. -func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipartUploadOutput { - s.VersionId = &v +// SetId sets the Id field's value. +func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketInventoryConfigurationInput { + s.Id = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedMultipartUpload -type CompletedMultipartUpload struct { +type DeleteBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure"` - - Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` } // String returns the string representation -func (s CompletedMultipartUpload) String() string { +func (s DeleteBucketInventoryConfigurationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CompletedMultipartUpload) GoString() string { +func (s DeleteBucketInventoryConfigurationOutput) GoString() string { return s.String() } -// SetParts sets the Parts field's value. -func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultipartUpload { - s.Parts = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedPart -type CompletedPart struct { +type DeleteBucketLifecycleInput struct { _ struct{} `type:"structure"` - // Entity tag returned when the part was uploaded. - ETag *string `type:"string"` - - // Part number that identifies the part. This is a positive integer between - // 1 and 10,000. - PartNumber *int64 `type:"integer"` + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } // String returns the string representation -func (s CompletedPart) String() string { +func (s DeleteBucketLifecycleInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CompletedPart) GoString() string { +func (s DeleteBucketLifecycleInput) GoString() string { return s.String() } -// SetETag sets the ETag field's value. -func (s *CompletedPart) SetETag(v string) *CompletedPart { - s.ETag = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetPartNumber sets the PartNumber field's value. -func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart { - s.PartNumber = &v +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketLifecycleInput) SetBucket(v string) *DeleteBucketLifecycleInput { + s.Bucket = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Condition -type Condition struct { - _ struct{} `type:"structure"` - - // The HTTP error code when the redirect is applied. In the event of an error, - // if the error code equals this value, then the specified redirect is applied. - // Required when parent element Condition is specified and sibling KeyPrefixEquals - // is not specified. If both are specified, then both must be true for the redirect - // to be applied. - HttpErrorCodeReturnedEquals *string `type:"string"` +func (s *DeleteBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} - // The object key name prefix when the redirect is applied. For example, to - // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. - // To redirect request for all pages with the prefix docs/, the key prefix will - // be /docs, which identifies all objects in the docs/ folder. Required when - // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals - // is not specified. If both conditions are specified, both must be true for - // the redirect to be applied. - KeyPrefixEquals *string `type:"string"` +type DeleteBucketLifecycleOutput struct { + _ struct{} `type:"structure"` } // String returns the string representation -func (s Condition) String() string { +func (s DeleteBucketLifecycleOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Condition) GoString() string { +func (s DeleteBucketLifecycleOutput) GoString() string { return s.String() } -// SetHttpErrorCodeReturnedEquals sets the HttpErrorCodeReturnedEquals field's value. -func (s *Condition) SetHttpErrorCodeReturnedEquals(v string) *Condition { - s.HttpErrorCodeReturnedEquals = &v - return s -} - -// SetKeyPrefixEquals sets the KeyPrefixEquals field's value. -func (s *Condition) SetKeyPrefixEquals(v string) *Condition { - s.KeyPrefixEquals = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectRequest -type CopyObjectInput struct { +type DeleteBucketMetricsConfigurationInput struct { _ struct{} `type:"structure"` - // The canned ACL to apply to the object. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - + // The name of the bucket containing the metrics configuration to delete. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // The name of the source bucket and key name of the source object, separated - // by a slash (/). Must be URL-encoded. + // The ID used to identify the metrics configuration. // - // CopySource is a required field - CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} - // Copies the object if its entity tag (ETag) matches the specified tag. - CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` +// String returns the string representation +func (s DeleteBucketMetricsConfigurationInput) String() string { + return awsutil.Prettify(s) +} - // Copies the object if it has been modified since the specified time. - CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` +// GoString returns the string representation +func (s DeleteBucketMetricsConfigurationInput) GoString() string { + return s.String() +} - // Copies the object if its entity tag (ETag) is different than the specified - // ETag. - CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketMetricsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } - // Copies the object if it hasn't been modified since the specified time. - CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // Specifies the algorithm to use when decrypting the source object (e.g., AES256). - CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketMetricsConfigurationInput) SetBucket(v string) *DeleteBucketMetricsConfigurationInput { + s.Bucket = &v + return s +} - // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt - // the source object. The encryption key provided in this header must be one - // that was used when the source object was created. - CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` +func (s *DeleteBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` +// SetId sets the Id field's value. +func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMetricsConfigurationInput { + s.Id = &v + return s +} - // The date and time at which the object is no longer cacheable. - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` +type DeleteBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure"` +} - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` +// String returns the string representation +func (s DeleteBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} - // Allows grantee to read the object data and its metadata. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` +// GoString returns the string representation +func (s DeleteBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} - // Allows grantee to read the object ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` +type DeleteBucketOutput struct { + _ struct{} `type:"structure"` +} - // Allows grantee to write the ACL for the applicable object. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` +// String returns the string representation +func (s DeleteBucketOutput) String() string { + return awsutil.Prettify(s) +} - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` +// GoString returns the string representation +func (s DeleteBucketOutput) GoString() string { + return s.String() +} - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` +type DeleteBucketPolicyInput struct { + _ struct{} `type:"structure"` - // Specifies whether the metadata is copied from the source object or replaced - // with metadata provided in the request. - MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +// String returns the string representation +func (s DeleteBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` +// GoString returns the string representation +func (s DeleteBucketPolicyInput) GoString() string { + return s.String() +} - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT - // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketPolicyInput) SetBucket(v string) *DeleteBucketPolicyInput { + s.Bucket = &v + return s +} - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +func (s *DeleteBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} - // The type of storage to use for the object. Defaults to 'STANDARD'. - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` +type DeleteBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} - // The tag-set for the object destination object this value must be used in - // conjunction with the TaggingDirective. The tag-set must be encoded as URL - // Query parameters - Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` +// String returns the string representation +func (s DeleteBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} - // Specifies whether the object tag-set are copied from the source object or - // replaced with tag-set provided in the request. - TaggingDirective *string `location:"header" locationName:"x-amz-tagging-directive" type:"string" enum:"TaggingDirective"` +// GoString returns the string representation +func (s DeleteBucketPolicyOutput) GoString() string { + return s.String() +} - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +type DeleteBucketReplicationInput struct { + _ struct{} `type:"structure"` + + // The bucket name. + // + // It can take a while to propagate the deletion of a replication configuration + // to all Amazon S3 systems. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } // String returns the string representation -func (s CopyObjectInput) String() string { +func (s DeleteBucketReplicationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CopyObjectInput) GoString() string { +func (s DeleteBucketReplicationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CopyObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CopyObjectInput"} +func (s *DeleteBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketReplicationInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.CopySource == nil { - invalidParams.Add(request.NewErrParamRequired("CopySource")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) } if invalidParams.Len() > 0 { @@ -7306,439 +10186,530 @@ func (s *CopyObjectInput) Validate() error { return nil } -// SetACL sets the ACL field's value. -func (s *CopyObjectInput) SetACL(v string) *CopyObjectInput { - s.ACL = &v - return s -} - // SetBucket sets the Bucket field's value. -func (s *CopyObjectInput) SetBucket(v string) *CopyObjectInput { +func (s *DeleteBucketReplicationInput) SetBucket(v string) *DeleteBucketReplicationInput { s.Bucket = &v return s } -// SetCacheControl sets the CacheControl field's value. -func (s *CopyObjectInput) SetCacheControl(v string) *CopyObjectInput { - s.CacheControl = &v - return s +func (s *DeleteBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// SetContentDisposition sets the ContentDisposition field's value. -func (s *CopyObjectInput) SetContentDisposition(v string) *CopyObjectInput { - s.ContentDisposition = &v - return s +type DeleteBucketReplicationOutput struct { + _ struct{} `type:"structure"` } -// SetContentEncoding sets the ContentEncoding field's value. -func (s *CopyObjectInput) SetContentEncoding(v string) *CopyObjectInput { - s.ContentEncoding = &v - return s +// String returns the string representation +func (s DeleteBucketReplicationOutput) String() string { + return awsutil.Prettify(s) } -// SetContentLanguage sets the ContentLanguage field's value. -func (s *CopyObjectInput) SetContentLanguage(v string) *CopyObjectInput { - s.ContentLanguage = &v - return s +// GoString returns the string representation +func (s DeleteBucketReplicationOutput) GoString() string { + return s.String() } -// SetContentType sets the ContentType field's value. -func (s *CopyObjectInput) SetContentType(v string) *CopyObjectInput { - s.ContentType = &v - return s +type DeleteBucketTaggingInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } -// SetCopySource sets the CopySource field's value. -func (s *CopyObjectInput) SetCopySource(v string) *CopyObjectInput { - s.CopySource = &v - return s +// String returns the string representation +func (s DeleteBucketTaggingInput) String() string { + return awsutil.Prettify(s) } -// SetCopySourceIfMatch sets the CopySourceIfMatch field's value. -func (s *CopyObjectInput) SetCopySourceIfMatch(v string) *CopyObjectInput { - s.CopySourceIfMatch = &v - return s +// GoString returns the string representation +func (s DeleteBucketTaggingInput) GoString() string { + return s.String() } -// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value. -func (s *CopyObjectInput) SetCopySourceIfModifiedSince(v time.Time) *CopyObjectInput { - s.CopySourceIfModifiedSince = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value. -func (s *CopyObjectInput) SetCopySourceIfNoneMatch(v string) *CopyObjectInput { - s.CopySourceIfNoneMatch = &v +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketTaggingInput) SetBucket(v string) *DeleteBucketTaggingInput { + s.Bucket = &v return s } -// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value. -func (s *CopyObjectInput) SetCopySourceIfUnmodifiedSince(v time.Time) *CopyObjectInput { - s.CopySourceIfUnmodifiedSince = &v - return s +func (s *DeleteBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value. -func (s *CopyObjectInput) SetCopySourceSSECustomerAlgorithm(v string) *CopyObjectInput { - s.CopySourceSSECustomerAlgorithm = &v - return s +type DeleteBucketTaggingOutput struct { + _ struct{} `type:"structure"` } -// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value. -func (s *CopyObjectInput) SetCopySourceSSECustomerKey(v string) *CopyObjectInput { - s.CopySourceSSECustomerKey = &v - return s +// String returns the string representation +func (s DeleteBucketTaggingOutput) String() string { + return awsutil.Prettify(s) } -// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. -func (s *CopyObjectInput) SetCopySourceSSECustomerKeyMD5(v string) *CopyObjectInput { - s.CopySourceSSECustomerKeyMD5 = &v - return s +// GoString returns the string representation +func (s DeleteBucketTaggingOutput) GoString() string { + return s.String() +} + +type DeleteBucketWebsiteInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetExpires sets the Expires field's value. -func (s *CopyObjectInput) SetExpires(v time.Time) *CopyObjectInput { - s.Expires = &v +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketWebsiteInput) SetBucket(v string) *DeleteBucketWebsiteInput { + s.Bucket = &v return s } -// SetGrantFullControl sets the GrantFullControl field's value. -func (s *CopyObjectInput) SetGrantFullControl(v string) *CopyObjectInput { - s.GrantFullControl = &v - return s +func (s *DeleteBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// SetGrantRead sets the GrantRead field's value. -func (s *CopyObjectInput) SetGrantRead(v string) *CopyObjectInput { - s.GrantRead = &v - return s +type DeleteBucketWebsiteOutput struct { + _ struct{} `type:"structure"` } -// SetGrantReadACP sets the GrantReadACP field's value. -func (s *CopyObjectInput) SetGrantReadACP(v string) *CopyObjectInput { - s.GrantReadACP = &v - return s +// String returns the string representation +func (s DeleteBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) } -// SetGrantWriteACP sets the GrantWriteACP field's value. -func (s *CopyObjectInput) SetGrantWriteACP(v string) *CopyObjectInput { - s.GrantWriteACP = &v - return s +// GoString returns the string representation +func (s DeleteBucketWebsiteOutput) GoString() string { + return s.String() } -// SetKey sets the Key field's value. -func (s *CopyObjectInput) SetKey(v string) *CopyObjectInput { - s.Key = &v - return s -} +type DeleteMarkerEntry struct { + _ struct{} `type:"structure"` -// SetMetadata sets the Metadata field's value. -func (s *CopyObjectInput) SetMetadata(v map[string]*string) *CopyObjectInput { - s.Metadata = v - return s + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp"` + + Owner *Owner `type:"structure"` + + // Version ID of an object. + VersionId *string `type:"string"` } -// SetMetadataDirective sets the MetadataDirective field's value. -func (s *CopyObjectInput) SetMetadataDirective(v string) *CopyObjectInput { - s.MetadataDirective = &v - return s +// String returns the string representation +func (s DeleteMarkerEntry) String() string { + return awsutil.Prettify(s) } -// SetRequestPayer sets the RequestPayer field's value. -func (s *CopyObjectInput) SetRequestPayer(v string) *CopyObjectInput { - s.RequestPayer = &v - return s +// GoString returns the string representation +func (s DeleteMarkerEntry) GoString() string { + return s.String() } -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *CopyObjectInput) SetSSECustomerAlgorithm(v string) *CopyObjectInput { - s.SSECustomerAlgorithm = &v +// SetIsLatest sets the IsLatest field's value. +func (s *DeleteMarkerEntry) SetIsLatest(v bool) *DeleteMarkerEntry { + s.IsLatest = &v return s } -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *CopyObjectInput) SetSSECustomerKey(v string) *CopyObjectInput { - s.SSECustomerKey = &v +// SetKey sets the Key field's value. +func (s *DeleteMarkerEntry) SetKey(v string) *DeleteMarkerEntry { + s.Key = &v return s } -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput { - s.SSECustomerKeyMD5 = &v +// SetLastModified sets the LastModified field's value. +func (s *DeleteMarkerEntry) SetLastModified(v time.Time) *DeleteMarkerEntry { + s.LastModified = &v return s } -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *CopyObjectInput) SetSSEKMSKeyId(v string) *CopyObjectInput { - s.SSEKMSKeyId = &v +// SetOwner sets the Owner field's value. +func (s *DeleteMarkerEntry) SetOwner(v *Owner) *DeleteMarkerEntry { + s.Owner = v return s } -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *CopyObjectInput) SetServerSideEncryption(v string) *CopyObjectInput { - s.ServerSideEncryption = &v +// SetVersionId sets the VersionId field's value. +func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry { + s.VersionId = &v return s } -// SetStorageClass sets the StorageClass field's value. -func (s *CopyObjectInput) SetStorageClass(v string) *CopyObjectInput { - s.StorageClass = &v - return s +// Specifies whether Amazon S3 should replicate delete makers. +type DeleteMarkerReplication struct { + _ struct{} `type:"structure"` + + // The status of the delete marker replication. + // + // In the current implementation, Amazon S3 doesn't replicate the delete markers. + // The status must be Disabled. + Status *string `type:"string" enum:"DeleteMarkerReplicationStatus"` } -// SetTagging sets the Tagging field's value. -func (s *CopyObjectInput) SetTagging(v string) *CopyObjectInput { - s.Tagging = &v - return s +// String returns the string representation +func (s DeleteMarkerReplication) String() string { + return awsutil.Prettify(s) } -// SetTaggingDirective sets the TaggingDirective field's value. -func (s *CopyObjectInput) SetTaggingDirective(v string) *CopyObjectInput { - s.TaggingDirective = &v - return s +// GoString returns the string representation +func (s DeleteMarkerReplication) GoString() string { + return s.String() } -// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. -func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput { - s.WebsiteRedirectLocation = &v +// SetStatus sets the Status field's value. +func (s *DeleteMarkerReplication) SetStatus(v string) *DeleteMarkerReplication { + s.Status = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectOutput -type CopyObjectOutput struct { - _ struct{} `type:"structure" payload:"CopyObjectResult"` - - CopyObjectResult *CopyObjectResult `type:"structure"` - - CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` - - // If the object expiration is configured, the response includes this header. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` +type DeleteObjectInput struct { + _ struct{} `type:"structure"` - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + // Indicates whether S3 Object Lock should bypass Governance-mode restrictions + // to process this operation. + BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Version ID of the newly created copy. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } // String returns the string representation -func (s CopyObjectOutput) String() string { +func (s DeleteObjectInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CopyObjectOutput) GoString() string { +func (s DeleteObjectInput) GoString() string { return s.String() } -// SetCopyObjectResult sets the CopyObjectResult field's value. -func (s *CopyObjectOutput) SetCopyObjectResult(v *CopyObjectResult) *CopyObjectOutput { - s.CopyObjectResult = v +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectInput) SetBucket(v string) *DeleteObjectInput { + s.Bucket = &v return s } -// SetCopySourceVersionId sets the CopySourceVersionId field's value. -func (s *CopyObjectOutput) SetCopySourceVersionId(v string) *CopyObjectOutput { - s.CopySourceVersionId = &v +func (s *DeleteObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *DeleteObjectInput) SetBypassGovernanceRetention(v bool) *DeleteObjectInput { + s.BypassGovernanceRetention = &v return s } -// SetExpiration sets the Expiration field's value. -func (s *CopyObjectOutput) SetExpiration(v string) *CopyObjectOutput { - s.Expiration = &v +// SetKey sets the Key field's value. +func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput { + s.Key = &v return s } -// SetRequestCharged sets the RequestCharged field's value. -func (s *CopyObjectOutput) SetRequestCharged(v string) *CopyObjectOutput { - s.RequestCharged = &v +// SetMFA sets the MFA field's value. +func (s *DeleteObjectInput) SetMFA(v string) *DeleteObjectInput { + s.MFA = &v return s } -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *CopyObjectOutput) SetSSECustomerAlgorithm(v string) *CopyObjectOutput { - s.SSECustomerAlgorithm = &v +// SetRequestPayer sets the RequestPayer field's value. +func (s *DeleteObjectInput) SetRequestPayer(v string) *DeleteObjectInput { + s.RequestPayer = &v return s } -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *CopyObjectOutput) SetSSECustomerKeyMD5(v string) *CopyObjectOutput { - s.SSECustomerKeyMD5 = &v +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput { + s.VersionId = &v return s } -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *CopyObjectOutput) SetSSEKMSKeyId(v string) *CopyObjectOutput { - s.SSEKMSKeyId = &v +type DeleteObjectOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Returns the version ID of the delete marker created as a result of the DELETE + // operation. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectOutput) GoString() string { + return s.String() +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *DeleteObjectOutput) SetDeleteMarker(v bool) *DeleteObjectOutput { + s.DeleteMarker = &v return s } -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *CopyObjectOutput) SetServerSideEncryption(v string) *CopyObjectOutput { - s.ServerSideEncryption = &v +// SetRequestCharged sets the RequestCharged field's value. +func (s *DeleteObjectOutput) SetRequestCharged(v string) *DeleteObjectOutput { + s.RequestCharged = &v return s } // SetVersionId sets the VersionId field's value. -func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput { +func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput { s.VersionId = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectResult -type CopyObjectResult struct { +type DeleteObjectTaggingInput struct { _ struct{} `type:"structure"` - ETag *string `type:"string"` + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + // The versionId of the object that the tag-set will be removed from. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } // String returns the string representation -func (s CopyObjectResult) String() string { +func (s DeleteObjectTaggingInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CopyObjectResult) GoString() string { +func (s DeleteObjectTaggingInput) GoString() string { return s.String() } -// SetETag sets the ETag field's value. -func (s *CopyObjectResult) SetETag(v string) *CopyObjectResult { - s.ETag = &v - return s -} - -// SetLastModified sets the LastModified field's value. -func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult { - s.LastModified = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyPartResult -type CopyPartResult struct { - _ struct{} `type:"structure"` - - // Entity tag of the object. - ETag *string `type:"string"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } - // Date and time at which the object was uploaded. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// String returns the string representation -func (s CopyPartResult) String() string { - return awsutil.Prettify(s) +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectTaggingInput) SetBucket(v string) *DeleteObjectTaggingInput { + s.Bucket = &v + return s } -// GoString returns the string representation -func (s CopyPartResult) GoString() string { - return s.String() +func (s *DeleteObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// SetETag sets the ETag field's value. -func (s *CopyPartResult) SetETag(v string) *CopyPartResult { - s.ETag = &v +// SetKey sets the Key field's value. +func (s *DeleteObjectTaggingInput) SetKey(v string) *DeleteObjectTaggingInput { + s.Key = &v return s } -// SetLastModified sets the LastModified field's value. -func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult { - s.LastModified = &v +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingInput { + s.VersionId = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketConfiguration -type CreateBucketConfiguration struct { +type DeleteObjectTaggingOutput struct { _ struct{} `type:"structure"` - // Specifies the region where the bucket will be created. If you don't specify - // a region, the bucket will be created in US Standard. - LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` + // The versionId of the object the tag-set was removed from. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } // String returns the string representation -func (s CreateBucketConfiguration) String() string { +func (s DeleteObjectTaggingOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateBucketConfiguration) GoString() string { +func (s DeleteObjectTaggingOutput) GoString() string { return s.String() } -// SetLocationConstraint sets the LocationConstraint field's value. -func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucketConfiguration { - s.LocationConstraint = &v +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingOutput { + s.VersionId = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketRequest -type CreateBucketInput struct { - _ struct{} `type:"structure" payload:"CreateBucketConfiguration"` - - // The canned ACL to apply to the bucket. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` +type DeleteObjectsInput struct { + _ struct{} `type:"structure" payload:"Delete"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure"` - - // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to list the objects in the bucket. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + // Specifies whether you want to delete this object even if it has a Governance-type + // Object Lock in place. You must have sufficient permissions to perform this + // operation. + BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` - // Allows grantee to read the bucket ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + // Delete is a required field + Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // Allows grantee to create, overwrite, and delete any object in the bucket. - GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - // Allows grantee to write the ACL for the applicable bucket. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` } // String returns the string representation -func (s CreateBucketInput) String() string { +func (s DeleteObjectsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateBucketInput) GoString() string { +func (s DeleteObjectsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateBucketInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateBucketInput"} +func (s *DeleteObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectsInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Delete == nil { + invalidParams.Add(request.NewErrParamRequired("Delete")) + } + if s.Delete != nil { + if err := s.Delete.Validate(); err != nil { + invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -7746,186 +10717,251 @@ func (s *CreateBucketInput) Validate() error { return nil } -// SetACL sets the ACL field's value. -func (s *CreateBucketInput) SetACL(v string) *CreateBucketInput { - s.ACL = &v +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectsInput) SetBucket(v string) *DeleteObjectsInput { + s.Bucket = &v return s } -// SetBucket sets the Bucket field's value. -func (s *CreateBucketInput) SetBucket(v string) *CreateBucketInput { - s.Bucket = &v +func (s *DeleteObjectsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *DeleteObjectsInput) SetBypassGovernanceRetention(v bool) *DeleteObjectsInput { + s.BypassGovernanceRetention = &v return s } -// SetCreateBucketConfiguration sets the CreateBucketConfiguration field's value. -func (s *CreateBucketInput) SetCreateBucketConfiguration(v *CreateBucketConfiguration) *CreateBucketInput { - s.CreateBucketConfiguration = v +// SetDelete sets the Delete field's value. +func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput { + s.Delete = v return s } -// SetGrantFullControl sets the GrantFullControl field's value. -func (s *CreateBucketInput) SetGrantFullControl(v string) *CreateBucketInput { - s.GrantFullControl = &v +// SetMFA sets the MFA field's value. +func (s *DeleteObjectsInput) SetMFA(v string) *DeleteObjectsInput { + s.MFA = &v return s } -// SetGrantRead sets the GrantRead field's value. -func (s *CreateBucketInput) SetGrantRead(v string) *CreateBucketInput { - s.GrantRead = &v +// SetRequestPayer sets the RequestPayer field's value. +func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput { + s.RequestPayer = &v return s } -// SetGrantReadACP sets the GrantReadACP field's value. -func (s *CreateBucketInput) SetGrantReadACP(v string) *CreateBucketInput { - s.GrantReadACP = &v +type DeleteObjectsOutput struct { + _ struct{} `type:"structure"` + + Deleted []*DeletedObject `type:"list" flattened:"true"` + + Errors []*Error `locationName:"Error" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s DeleteObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectsOutput) GoString() string { + return s.String() +} + +// SetDeleted sets the Deleted field's value. +func (s *DeleteObjectsOutput) SetDeleted(v []*DeletedObject) *DeleteObjectsOutput { + s.Deleted = v return s } -// SetGrantWrite sets the GrantWrite field's value. -func (s *CreateBucketInput) SetGrantWrite(v string) *CreateBucketInput { - s.GrantWrite = &v +// SetErrors sets the Errors field's value. +func (s *DeleteObjectsOutput) SetErrors(v []*Error) *DeleteObjectsOutput { + s.Errors = v return s } -// SetGrantWriteACP sets the GrantWriteACP field's value. -func (s *CreateBucketInput) SetGrantWriteACP(v string) *CreateBucketInput { - s.GrantWriteACP = &v +// SetRequestCharged sets the RequestCharged field's value. +func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput { + s.RequestCharged = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketOutput -type CreateBucketOutput struct { +type DeletePublicAccessBlockInput struct { _ struct{} `type:"structure"` - Location *string `location:"header" locationName:"Location" type:"string"` + // The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePublicAccessBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePublicAccessBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePublicAccessBlockInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeletePublicAccessBlockInput) SetBucket(v string) *DeletePublicAccessBlockInput { + s.Bucket = &v + return s +} + +func (s *DeletePublicAccessBlockInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type DeletePublicAccessBlockOutput struct { + _ struct{} `type:"structure"` } // String returns the string representation -func (s CreateBucketOutput) String() string { +func (s DeletePublicAccessBlockOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateBucketOutput) GoString() string { +func (s DeletePublicAccessBlockOutput) GoString() string { return s.String() } -// SetLocation sets the Location field's value. -func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput { - s.Location = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadRequest -type CreateMultipartUploadInput struct { +type DeletedObject struct { _ struct{} `type:"structure"` - // The canned ACL to apply to the object. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + DeleteMarker *bool `type:"boolean"` - // The date and time at which the object is no longer cacheable. - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + DeleteMarkerVersionId *string `type:"string"` - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + Key *string `min:"1" type:"string"` - // Allows grantee to read the object data and its metadata. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + VersionId *string `type:"string"` +} - // Allows grantee to read the object ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` +// String returns the string representation +func (s DeletedObject) String() string { + return awsutil.Prettify(s) +} - // Allows grantee to write the ACL for the applicable object. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` +// GoString returns the string representation +func (s DeletedObject) GoString() string { + return s.String() +} - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *DeletedObject) SetDeleteMarker(v bool) *DeletedObject { + s.DeleteMarker = &v + return s +} - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` +// SetDeleteMarkerVersionId sets the DeleteMarkerVersionId field's value. +func (s *DeletedObject) SetDeleteMarkerVersionId(v string) *DeletedObject { + s.DeleteMarkerVersionId = &v + return s +} - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +// SetKey sets the Key field's value. +func (s *DeletedObject) SetKey(v string) *DeletedObject { + s.Key = &v + return s +} - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` +// SetVersionId sets the VersionId field's value. +func (s *DeletedObject) SetVersionId(v string) *DeletedObject { + s.VersionId = &v + return s +} - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` +// A container for information about the replication destination. +type Destination struct { + _ struct{} `type:"structure"` - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + // A container for information about access control for replicas. + // + // Use this element only in a cross-account scenario where source and destination + // bucket owners are not the same to change replica ownership to the AWS account + // that owns the destination bucket. If you don't add this element to the replication + // configuration, the replicas are owned by same AWS account that owns the source + // object. + AccessControlTranslation *AccessControlTranslation `type:"structure"` - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT - // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + // The account ID of the destination bucket. Currently, Amazon S3 verifies this + // value only if Access Control Translation is enabled. + // + // In a cross-account scenario, if you change replica ownership to the AWS account + // that owns the destination bucket by adding the AccessControlTranslation element, + // this is the account ID of the owner of the destination bucket. + Account *string `type:"string"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to + // store replicas of the object identified by the rule. + // + // If there are multiple rules in your replication configuration, all rules + // must specify the same bucket as the destination. A replication configuration + // can replicate objects to only one destination bucket. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` - // The type of storage to use for the object. Defaults to 'STANDARD'. - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + // A container that provides information about encryption. If SourceSelectionCriteria + // is specified, you must specify this element. + EncryptionConfiguration *EncryptionConfiguration `type:"structure"` - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` + // The class of storage used to store the object. By default Amazon S3 uses + // storage class of the source object when creating a replica. + StorageClass *string `type:"string" enum:"StorageClass"` } // String returns the string representation -func (s CreateMultipartUploadInput) String() string { +func (s Destination) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateMultipartUploadInput) GoString() string { +func (s Destination) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateMultipartUploadInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateMultipartUploadInput"} +func (s *Destination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Destination"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + if s.AccessControlTranslation != nil { + if err := s.AccessControlTranslation.Validate(); err != nil { + invalidParams.AddNested("AccessControlTranslation", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -7934,288 +10970,376 @@ func (s *CreateMultipartUploadInput) Validate() error { return nil } -// SetACL sets the ACL field's value. -func (s *CreateMultipartUploadInput) SetACL(v string) *CreateMultipartUploadInput { - s.ACL = &v +// SetAccessControlTranslation sets the AccessControlTranslation field's value. +func (s *Destination) SetAccessControlTranslation(v *AccessControlTranslation) *Destination { + s.AccessControlTranslation = v + return s +} + +// SetAccount sets the Account field's value. +func (s *Destination) SetAccount(v string) *Destination { + s.Account = &v return s } // SetBucket sets the Bucket field's value. -func (s *CreateMultipartUploadInput) SetBucket(v string) *CreateMultipartUploadInput { +func (s *Destination) SetBucket(v string) *Destination { s.Bucket = &v return s } -// SetCacheControl sets the CacheControl field's value. -func (s *CreateMultipartUploadInput) SetCacheControl(v string) *CreateMultipartUploadInput { - s.CacheControl = &v - return s +func (s *Destination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// SetContentDisposition sets the ContentDisposition field's value. -func (s *CreateMultipartUploadInput) SetContentDisposition(v string) *CreateMultipartUploadInput { - s.ContentDisposition = &v +// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. +func (s *Destination) SetEncryptionConfiguration(v *EncryptionConfiguration) *Destination { + s.EncryptionConfiguration = v return s } -// SetContentEncoding sets the ContentEncoding field's value. -func (s *CreateMultipartUploadInput) SetContentEncoding(v string) *CreateMultipartUploadInput { - s.ContentEncoding = &v +// SetStorageClass sets the StorageClass field's value. +func (s *Destination) SetStorageClass(v string) *Destination { + s.StorageClass = &v return s } -// SetContentLanguage sets the ContentLanguage field's value. -func (s *CreateMultipartUploadInput) SetContentLanguage(v string) *CreateMultipartUploadInput { - s.ContentLanguage = &v - return s +// Describes the server-side encryption that will be applied to the restore +// results. +type Encryption struct { + _ struct{} `type:"structure"` + + // The server-side encryption algorithm used when storing job results in Amazon + // S3 (e.g., AES256, aws:kms). + // + // EncryptionType is a required field + EncryptionType *string `type:"string" required:"true" enum:"ServerSideEncryption"` + + // If the encryption type is aws:kms, this optional value can be used to specify + // the encryption context for the restore results. + KMSContext *string `type:"string"` + + // If the encryption type is aws:kms, this optional value specifies the AWS + // KMS key ID to use for encryption of job results. + KMSKeyId *string `type:"string" sensitive:"true"` } -// SetContentType sets the ContentType field's value. -func (s *CreateMultipartUploadInput) SetContentType(v string) *CreateMultipartUploadInput { - s.ContentType = &v - return s +// String returns the string representation +func (s Encryption) String() string { + return awsutil.Prettify(s) } -// SetExpires sets the Expires field's value. -func (s *CreateMultipartUploadInput) SetExpires(v time.Time) *CreateMultipartUploadInput { - s.Expires = &v - return s +// GoString returns the string representation +func (s Encryption) GoString() string { + return s.String() } -// SetGrantFullControl sets the GrantFullControl field's value. -func (s *CreateMultipartUploadInput) SetGrantFullControl(v string) *CreateMultipartUploadInput { - s.GrantFullControl = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *Encryption) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Encryption"} + if s.EncryptionType == nil { + invalidParams.Add(request.NewErrParamRequired("EncryptionType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetGrantRead sets the GrantRead field's value. -func (s *CreateMultipartUploadInput) SetGrantRead(v string) *CreateMultipartUploadInput { - s.GrantRead = &v +// SetEncryptionType sets the EncryptionType field's value. +func (s *Encryption) SetEncryptionType(v string) *Encryption { + s.EncryptionType = &v return s } -// SetGrantReadACP sets the GrantReadACP field's value. -func (s *CreateMultipartUploadInput) SetGrantReadACP(v string) *CreateMultipartUploadInput { - s.GrantReadACP = &v +// SetKMSContext sets the KMSContext field's value. +func (s *Encryption) SetKMSContext(v string) *Encryption { + s.KMSContext = &v return s } -// SetGrantWriteACP sets the GrantWriteACP field's value. -func (s *CreateMultipartUploadInput) SetGrantWriteACP(v string) *CreateMultipartUploadInput { - s.GrantWriteACP = &v +// SetKMSKeyId sets the KMSKeyId field's value. +func (s *Encryption) SetKMSKeyId(v string) *Encryption { + s.KMSKeyId = &v return s } -// SetKey sets the Key field's value. -func (s *CreateMultipartUploadInput) SetKey(v string) *CreateMultipartUploadInput { - s.Key = &v - return s +// A container for information about the encryption-based configuration for +// replicas. +type EncryptionConfiguration struct { + _ struct{} `type:"structure"` + + // The ID of the AWS KMS key for the AWS Region where the destination bucket + // resides. Amazon S3 uses this key to encrypt the replica object. + ReplicaKmsKeyID *string `type:"string"` } -// SetMetadata sets the Metadata field's value. -func (s *CreateMultipartUploadInput) SetMetadata(v map[string]*string) *CreateMultipartUploadInput { - s.Metadata = v - return s +// String returns the string representation +func (s EncryptionConfiguration) String() string { + return awsutil.Prettify(s) } -// SetRequestPayer sets the RequestPayer field's value. -func (s *CreateMultipartUploadInput) SetRequestPayer(v string) *CreateMultipartUploadInput { - s.RequestPayer = &v - return s +// GoString returns the string representation +func (s EncryptionConfiguration) GoString() string { + return s.String() } -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *CreateMultipartUploadInput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadInput { - s.SSECustomerAlgorithm = &v +// SetReplicaKmsKeyID sets the ReplicaKmsKeyID field's value. +func (s *EncryptionConfiguration) SetReplicaKmsKeyID(v string) *EncryptionConfiguration { + s.ReplicaKmsKeyID = &v return s } -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *CreateMultipartUploadInput) SetSSECustomerKey(v string) *CreateMultipartUploadInput { - s.SSECustomerKey = &v - return s +type EndEvent struct { + _ struct{} `locationName:"EndEvent" type:"structure"` } -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadInput { - s.SSECustomerKeyMD5 = &v - return s +// String returns the string representation +func (s EndEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EndEvent) GoString() string { + return s.String() +} + +// The EndEvent is and event in the SelectObjectContentEventStream group of events. +func (s *EndEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the EndEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *EndEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + return nil +} + +type Error struct { + _ struct{} `type:"structure"` + + Code *string `type:"string"` + + Key *string `min:"1" type:"string"` + + Message *string `type:"string"` + + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s Error) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Error) GoString() string { + return s.String() } -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *CreateMultipartUploadInput) SetSSEKMSKeyId(v string) *CreateMultipartUploadInput { - s.SSEKMSKeyId = &v +// SetCode sets the Code field's value. +func (s *Error) SetCode(v string) *Error { + s.Code = &v return s } -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *CreateMultipartUploadInput) SetServerSideEncryption(v string) *CreateMultipartUploadInput { - s.ServerSideEncryption = &v +// SetKey sets the Key field's value. +func (s *Error) SetKey(v string) *Error { + s.Key = &v return s } -// SetStorageClass sets the StorageClass field's value. -func (s *CreateMultipartUploadInput) SetStorageClass(v string) *CreateMultipartUploadInput { - s.StorageClass = &v +// SetMessage sets the Message field's value. +func (s *Error) SetMessage(v string) *Error { + s.Message = &v return s } -// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. -func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *CreateMultipartUploadInput { - s.WebsiteRedirectLocation = &v +// SetVersionId sets the VersionId field's value. +func (s *Error) SetVersionId(v string) *Error { + s.VersionId = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadOutput -type CreateMultipartUploadOutput struct { +type ErrorDocument struct { _ struct{} `type:"structure"` - // Date when multipart upload will become eligible for abort operation by lifecycle. - AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"` - - // Id of the lifecycle rule that makes a multipart upload eligible for abort - // operation. - AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` + // The object key name to use when a 4XX class error occurs. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` +} - // Name of the bucket to which the multipart upload was initiated. - Bucket *string `locationName:"Bucket" type:"string"` +// String returns the string representation +func (s ErrorDocument) String() string { + return awsutil.Prettify(s) +} - // Object key for which the multipart upload was initiated. - Key *string `min:"1" type:"string"` +// GoString returns the string representation +func (s ErrorDocument) GoString() string { + return s.String() +} - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *ErrorDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` +// SetKey sets the Key field's value. +func (s *ErrorDocument) SetKey(v string) *ErrorDocument { + s.Key = &v + return s +} - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` +// A container for a key value pair that defines the criteria for the filter +// rule. +type FilterRule struct { + _ struct{} `type:"structure"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + // The object key name prefix or suffix identifying one or more objects to which + // the filtering rule applies. The maximum prefix length is 1,024 characters. + // Overlapping prefixes and suffixes are not supported. For more information, + // see Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Name *string `type:"string" enum:"FilterRuleName"` - // ID for the initiated multipart upload. - UploadId *string `type:"string"` + Value *string `type:"string"` } // String returns the string representation -func (s CreateMultipartUploadOutput) String() string { +func (s FilterRule) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateMultipartUploadOutput) GoString() string { +func (s FilterRule) GoString() string { return s.String() } -// SetAbortDate sets the AbortDate field's value. -func (s *CreateMultipartUploadOutput) SetAbortDate(v time.Time) *CreateMultipartUploadOutput { - s.AbortDate = &v +// SetName sets the Name field's value. +func (s *FilterRule) SetName(v string) *FilterRule { + s.Name = &v return s } -// SetAbortRuleId sets the AbortRuleId field's value. -func (s *CreateMultipartUploadOutput) SetAbortRuleId(v string) *CreateMultipartUploadOutput { - s.AbortRuleId = &v +// SetValue sets the Value field's value. +func (s *FilterRule) SetValue(v string) *FilterRule { + s.Value = &v return s } -// SetBucket sets the Bucket field's value. -func (s *CreateMultipartUploadOutput) SetBucket(v string) *CreateMultipartUploadOutput { - s.Bucket = &v - return s +type GetBucketAccelerateConfigurationInput struct { + _ struct{} `type:"structure"` + + // Name of the bucket for which the accelerate configuration is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } -// SetKey sets the Key field's value. -func (s *CreateMultipartUploadOutput) SetKey(v string) *CreateMultipartUploadOutput { - s.Key = &v - return s +// String returns the string representation +func (s GetBucketAccelerateConfigurationInput) String() string { + return awsutil.Prettify(s) } -// SetRequestCharged sets the RequestCharged field's value. -func (s *CreateMultipartUploadOutput) SetRequestCharged(v string) *CreateMultipartUploadOutput { - s.RequestCharged = &v - return s +// GoString returns the string representation +func (s GetBucketAccelerateConfigurationInput) GoString() string { + return s.String() } -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *CreateMultipartUploadOutput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadOutput { - s.SSECustomerAlgorithm = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAccelerateConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAccelerateConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *CreateMultipartUploadOutput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadOutput { - s.SSECustomerKeyMD5 = &v +// SetBucket sets the Bucket field's value. +func (s *GetBucketAccelerateConfigurationInput) SetBucket(v string) *GetBucketAccelerateConfigurationInput { + s.Bucket = &v return s } -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput { - s.SSEKMSKeyId = &v - return s +func (s *GetBucketAccelerateConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *CreateMultipartUploadOutput) SetServerSideEncryption(v string) *CreateMultipartUploadOutput { - s.ServerSideEncryption = &v - return s +type GetBucketAccelerateConfigurationOutput struct { + _ struct{} `type:"structure"` + + // The accelerate configuration of the bucket. + Status *string `type:"string" enum:"BucketAccelerateStatus"` } -// SetUploadId sets the UploadId field's value. -func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUploadOutput { - s.UploadId = &v +// String returns the string representation +func (s GetBucketAccelerateConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAccelerateConfigurationOutput) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketAccelerateConfigurationOutput { + s.Status = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Delete -type Delete struct { +type GetBucketAclInput struct { _ struct{} `type:"structure"` - // Objects is a required field - Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` - - // Element to enable quiet mode for the request. When you add this element, - // you must set its value to true. - Quiet *bool `type:"boolean"` + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } // String returns the string representation -func (s Delete) String() string { +func (s GetBucketAclInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Delete) GoString() string { +func (s GetBucketAclInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *Delete) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Delete"} - if s.Objects == nil { - invalidParams.Add(request.NewErrParamRequired("Objects")) +func (s *GetBucketAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Objects != nil { - for i, v := range s.Objects { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Objects", i), err.(request.ErrInvalidParams)) - } - } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) } if invalidParams.Len() > 0 { @@ -8224,23 +11348,54 @@ func (s *Delete) Validate() error { return nil } -// SetObjects sets the Objects field's value. -func (s *Delete) SetObjects(v []*ObjectIdentifier) *Delete { - s.Objects = v +// SetBucket sets the Bucket field's value. +func (s *GetBucketAclInput) SetBucket(v string) *GetBucketAclInput { + s.Bucket = &v return s } -// SetQuiet sets the Quiet field's value. -func (s *Delete) SetQuiet(v bool) *Delete { - s.Quiet = &v +func (s *GetBucketAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s GetBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAclOutput) GoString() string { + return s.String() +} + +// SetGrants sets the Grants field's value. +func (s *GetBucketAclOutput) SetGrants(v []*Grant) *GetBucketAclOutput { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput { + s.Owner = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationRequest -type DeleteBucketAnalyticsConfigurationInput struct { +type GetBucketAnalyticsConfigurationInput struct { _ struct{} `type:"structure"` - // The name of the bucket from which an analytics configuration is deleted. + // The name of the bucket from which an analytics configuration is retrieved. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -8252,21 +11407,24 @@ type DeleteBucketAnalyticsConfigurationInput struct { } // String returns the string representation -func (s DeleteBucketAnalyticsConfigurationInput) String() string { +func (s GetBucketAnalyticsConfigurationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBucketAnalyticsConfigurationInput) GoString() string { +func (s GetBucketAnalyticsConfigurationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketAnalyticsConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketAnalyticsConfigurationInput"} +func (s *GetBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAnalyticsConfigurationInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -8278,34 +11436,48 @@ func (s *DeleteBucketAnalyticsConfigurationInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *DeleteBucketAnalyticsConfigurationInput) SetBucket(v string) *DeleteBucketAnalyticsConfigurationInput { +func (s *GetBucketAnalyticsConfigurationInput) SetBucket(v string) *GetBucketAnalyticsConfigurationInput { s.Bucket = &v return s } +func (s *GetBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetId sets the Id field's value. -func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketAnalyticsConfigurationInput { +func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyticsConfigurationInput { s.Id = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationOutput -type DeleteBucketAnalyticsConfigurationOutput struct { - _ struct{} `type:"structure"` +type GetBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` + + // The configuration and any analyses for the analytics filter. + AnalyticsConfiguration *AnalyticsConfiguration `type:"structure"` } // String returns the string representation -func (s DeleteBucketAnalyticsConfigurationOutput) String() string { +func (s GetBucketAnalyticsConfigurationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string { +func (s GetBucketAnalyticsConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsRequest -type DeleteBucketCorsInput struct { +// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value. +func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *GetBucketAnalyticsConfigurationOutput { + s.AnalyticsConfiguration = v + return s +} + +type GetBucketCorsInput struct { _ struct{} `type:"structure"` // Bucket is a required field @@ -8313,21 +11485,24 @@ type DeleteBucketCorsInput struct { } // String returns the string representation -func (s DeleteBucketCorsInput) String() string { +func (s GetBucketCorsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBucketCorsInput) GoString() string { +func (s GetBucketCorsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketCorsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketCorsInput"} +func (s *GetBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketCorsInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -8336,50 +11511,69 @@ func (s *DeleteBucketCorsInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *DeleteBucketCorsInput) SetBucket(v string) *DeleteBucketCorsInput { +func (s *GetBucketCorsInput) SetBucket(v string) *GetBucketCorsInput { s.Bucket = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsOutput -type DeleteBucketCorsOutput struct { +func (s *GetBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketCorsOutput struct { _ struct{} `type:"structure"` + + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` } // String returns the string representation -func (s DeleteBucketCorsOutput) String() string { +func (s GetBucketCorsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBucketCorsOutput) GoString() string { +func (s GetBucketCorsOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketRequest -type DeleteBucketInput struct { +// SetCORSRules sets the CORSRules field's value. +func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput { + s.CORSRules = v + return s +} + +type GetBucketEncryptionInput struct { _ struct{} `type:"structure"` + // The name of the bucket from which the server-side encryption configuration + // is retrieved. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } // String returns the string representation -func (s DeleteBucketInput) String() string { +func (s GetBucketEncryptionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBucketInput) GoString() string { +func (s GetBucketEncryptionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"} +func (s *GetBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketEncryptionInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -8388,16 +11582,46 @@ func (s *DeleteBucketInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput { +func (s *GetBucketEncryptionInput) SetBucket(v string) *GetBucketEncryptionInput { s.Bucket = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationRequest -type DeleteBucketInventoryConfigurationInput struct { +func (s *GetBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketEncryptionOutput struct { + _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` + + // Container for server-side encryption configuration rules. Currently S3 supports + // one rule only. + ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketEncryptionOutput) GoString() string { + return s.String() +} + +// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value. +func (s *GetBucketEncryptionOutput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *GetBucketEncryptionOutput { + s.ServerSideEncryptionConfiguration = v + return s +} + +type GetBucketInventoryConfigurationInput struct { _ struct{} `type:"structure"` - // The name of the bucket containing the inventory configuration to delete. + // The name of the bucket containing the inventory configuration to retrieve. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -8409,21 +11633,24 @@ type DeleteBucketInventoryConfigurationInput struct { } // String returns the string representation -func (s DeleteBucketInventoryConfigurationInput) String() string { +func (s GetBucketInventoryConfigurationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBucketInventoryConfigurationInput) GoString() string { +func (s GetBucketInventoryConfigurationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketInventoryConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInventoryConfigurationInput"} +func (s *GetBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketInventoryConfigurationInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -8435,34 +11662,48 @@ func (s *DeleteBucketInventoryConfigurationInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *DeleteBucketInventoryConfigurationInput) SetBucket(v string) *DeleteBucketInventoryConfigurationInput { +func (s *GetBucketInventoryConfigurationInput) SetBucket(v string) *GetBucketInventoryConfigurationInput { s.Bucket = &v return s } +func (s *GetBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetId sets the Id field's value. -func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketInventoryConfigurationInput { +func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInventoryConfigurationInput { s.Id = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationOutput -type DeleteBucketInventoryConfigurationOutput struct { - _ struct{} `type:"structure"` +type GetBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure" payload:"InventoryConfiguration"` + + // Specifies the inventory configuration. + InventoryConfiguration *InventoryConfiguration `type:"structure"` } // String returns the string representation -func (s DeleteBucketInventoryConfigurationOutput) String() string { +func (s GetBucketInventoryConfigurationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBucketInventoryConfigurationOutput) GoString() string { +func (s GetBucketInventoryConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleRequest -type DeleteBucketLifecycleInput struct { +// SetInventoryConfiguration sets the InventoryConfiguration field's value. +func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *InventoryConfiguration) *GetBucketInventoryConfigurationOutput { + s.InventoryConfiguration = v + return s +} + +type GetBucketLifecycleConfigurationInput struct { _ struct{} `type:"structure"` // Bucket is a required field @@ -8470,21 +11711,24 @@ type DeleteBucketLifecycleInput struct { } // String returns the string representation -func (s DeleteBucketLifecycleInput) String() string { +func (s GetBucketLifecycleConfigurationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBucketLifecycleInput) GoString() string { +func (s GetBucketLifecycleConfigurationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketLifecycleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketLifecycleInput"} +func (s *GetBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleConfigurationInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -8493,59 +11737,65 @@ func (s *DeleteBucketLifecycleInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *DeleteBucketLifecycleInput) SetBucket(v string) *DeleteBucketLifecycleInput { +func (s *GetBucketLifecycleConfigurationInput) SetBucket(v string) *GetBucketLifecycleConfigurationInput { s.Bucket = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleOutput -type DeleteBucketLifecycleOutput struct { +func (s *GetBucketLifecycleConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketLifecycleConfigurationOutput struct { _ struct{} `type:"structure"` + + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` } // String returns the string representation -func (s DeleteBucketLifecycleOutput) String() string { +func (s GetBucketLifecycleConfigurationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBucketLifecycleOutput) GoString() string { +func (s GetBucketLifecycleConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationRequest -type DeleteBucketMetricsConfigurationInput struct { +// SetRules sets the Rules field's value. +func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *GetBucketLifecycleConfigurationOutput { + s.Rules = v + return s +} + +type GetBucketLifecycleInput struct { _ struct{} `type:"structure"` - // The name of the bucket containing the metrics configuration to delete. - // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The ID used to identify the metrics configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` } // String returns the string representation -func (s DeleteBucketMetricsConfigurationInput) String() string { +func (s GetBucketLifecycleInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBucketMetricsConfigurationInput) GoString() string { +func (s GetBucketLifecycleInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketMetricsConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketMetricsConfigurationInput"} +func (s *GetBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) } if invalidParams.Len() > 0 { @@ -8555,49 +11805,109 @@ func (s *DeleteBucketMetricsConfigurationInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *DeleteBucketMetricsConfigurationInput) SetBucket(v string) *DeleteBucketMetricsConfigurationInput { +func (s *GetBucketLifecycleInput) SetBucket(v string) *GetBucketLifecycleInput { s.Bucket = &v return s } -// SetId sets the Id field's value. -func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMetricsConfigurationInput { - s.Id = &v +func (s *GetBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketLifecycleOutput struct { + _ struct{} `type:"structure"` + + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleOutput) GoString() string { + return s.String() +} + +// SetRules sets the Rules field's value. +func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput { + s.Rules = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationOutput -type DeleteBucketMetricsConfigurationOutput struct { +type GetBucketLocationInput struct { _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } // String returns the string representation -func (s DeleteBucketMetricsConfigurationOutput) String() string { +func (s GetBucketLocationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBucketMetricsConfigurationOutput) GoString() string { +func (s GetBucketLocationInput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOutput -type DeleteBucketOutput struct { +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLocationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLocationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLocationInput) SetBucket(v string) *GetBucketLocationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLocationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketLocationOutput struct { _ struct{} `type:"structure"` + + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` } // String returns the string representation -func (s DeleteBucketOutput) String() string { +func (s GetBucketLocationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBucketOutput) GoString() string { +func (s GetBucketLocationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyRequest -type DeleteBucketPolicyInput struct { +// SetLocationConstraint sets the LocationConstraint field's value. +func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLocationOutput { + s.LocationConstraint = &v + return s +} + +type GetBucketLoggingInput struct { _ struct{} `type:"structure"` // Bucket is a required field @@ -8605,21 +11915,24 @@ type DeleteBucketPolicyInput struct { } // String returns the string representation -func (s DeleteBucketPolicyInput) String() string { +func (s GetBucketLoggingInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBucketPolicyInput) GoString() string { +func (s GetBucketLoggingInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketPolicyInput"} +func (s *GetBucketLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLoggingInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -8628,50 +11941,79 @@ func (s *DeleteBucketPolicyInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *DeleteBucketPolicyInput) SetBucket(v string) *DeleteBucketPolicyInput { +func (s *GetBucketLoggingInput) SetBucket(v string) *GetBucketLoggingInput { s.Bucket = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyOutput -type DeleteBucketPolicyOutput struct { +func (s *GetBucketLoggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketLoggingOutput struct { _ struct{} `type:"structure"` + + // Container for logging information. Presence of this element indicates that + // logging is enabled. Parameters TargetBucket and TargetPrefix are required + // in this case. + LoggingEnabled *LoggingEnabled `type:"structure"` } // String returns the string representation -func (s DeleteBucketPolicyOutput) String() string { +func (s GetBucketLoggingOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBucketPolicyOutput) GoString() string { +func (s GetBucketLoggingOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationRequest -type DeleteBucketReplicationInput struct { +// SetLoggingEnabled sets the LoggingEnabled field's value. +func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucketLoggingOutput { + s.LoggingEnabled = v + return s +} + +type GetBucketMetricsConfigurationInput struct { _ struct{} `type:"structure"` + // The name of the bucket containing the metrics configuration to retrieve. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` } // String returns the string representation -func (s DeleteBucketReplicationInput) String() string { +func (s GetBucketMetricsConfigurationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBucketReplicationInput) GoString() string { +func (s GetBucketMetricsConfigurationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketReplicationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketReplicationInput"} +func (s *GetBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketMetricsConfigurationInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } if invalidParams.Len() > 0 { return invalidParams @@ -8680,50 +12022,75 @@ func (s *DeleteBucketReplicationInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *DeleteBucketReplicationInput) SetBucket(v string) *DeleteBucketReplicationInput { +func (s *GetBucketMetricsConfigurationInput) SetBucket(v string) *GetBucketMetricsConfigurationInput { s.Bucket = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationOutput -type DeleteBucketReplicationOutput struct { - _ struct{} `type:"structure"` +func (s *GetBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsConfigurationInput { + s.Id = &v + return s +} + +type GetBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure" payload:"MetricsConfiguration"` + + // Specifies the metrics configuration. + MetricsConfiguration *MetricsConfiguration `type:"structure"` } // String returns the string representation -func (s DeleteBucketReplicationOutput) String() string { +func (s GetBucketMetricsConfigurationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBucketReplicationOutput) GoString() string { +func (s GetBucketMetricsConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingRequest -type DeleteBucketTaggingInput struct { +// SetMetricsConfiguration sets the MetricsConfiguration field's value. +func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *MetricsConfiguration) *GetBucketMetricsConfigurationOutput { + s.MetricsConfiguration = v + return s +} + +type GetBucketNotificationConfigurationRequest struct { _ struct{} `type:"structure"` + // Name of the bucket to get the notification configuration for. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } // String returns the string representation -func (s DeleteBucketTaggingInput) String() string { +func (s GetBucketNotificationConfigurationRequest) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBucketTaggingInput) GoString() string { +func (s GetBucketNotificationConfigurationRequest) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketTaggingInput"} +func (s *GetBucketNotificationConfigurationRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketNotificationConfigurationRequest"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -8732,28 +12099,19 @@ func (s *DeleteBucketTaggingInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *DeleteBucketTaggingInput) SetBucket(v string) *DeleteBucketTaggingInput { +func (s *GetBucketNotificationConfigurationRequest) SetBucket(v string) *GetBucketNotificationConfigurationRequest { s.Bucket = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingOutput -type DeleteBucketTaggingOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketTaggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketTaggingOutput) GoString() string { - return s.String() +func (s *GetBucketNotificationConfigurationRequest) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteRequest -type DeleteBucketWebsiteInput struct { +type GetBucketPolicyInput struct { _ struct{} `type:"structure"` // Bucket is a required field @@ -8761,21 +12119,24 @@ type DeleteBucketWebsiteInput struct { } // String returns the string representation -func (s DeleteBucketWebsiteInput) String() string { +func (s GetBucketPolicyInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBucketWebsiteInput) GoString() string { +func (s GetBucketPolicyInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketWebsiteInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketWebsiteInput"} +func (s *GetBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -8784,249 +12145,207 @@ func (s *DeleteBucketWebsiteInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *DeleteBucketWebsiteInput) SetBucket(v string) *DeleteBucketWebsiteInput { +func (s *GetBucketPolicyInput) SetBucket(v string) *GetBucketPolicyInput { s.Bucket = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteOutput -type DeleteBucketWebsiteOutput struct { - _ struct{} `type:"structure"` +func (s *GetBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketPolicyOutput struct { + _ struct{} `type:"structure" payload:"Policy"` + + // The bucket policy as a JSON document. + Policy *string `type:"string"` } // String returns the string representation -func (s DeleteBucketWebsiteOutput) String() string { +func (s GetBucketPolicyOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBucketWebsiteOutput) GoString() string { +func (s GetBucketPolicyOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteMarkerEntry -type DeleteMarkerEntry struct { - _ struct{} `type:"structure"` - - // Specifies whether the object is (true) or is not (false) the latest version - // of an object. - IsLatest *bool `type:"boolean"` - - // The object key. - Key *string `min:"1" type:"string"` - - // Date and time the object was last modified. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` +// SetPolicy sets the Policy field's value. +func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput { + s.Policy = &v + return s +} - Owner *Owner `type:"structure"` +type GetBucketPolicyStatusInput struct { + _ struct{} `type:"structure"` - // Version ID of an object. - VersionId *string `type:"string"` + // The name of the Amazon S3 bucket whose policy status you want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } // String returns the string representation -func (s DeleteMarkerEntry) String() string { +func (s GetBucketPolicyStatusInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteMarkerEntry) GoString() string { +func (s GetBucketPolicyStatusInput) GoString() string { return s.String() } -// SetIsLatest sets the IsLatest field's value. -func (s *DeleteMarkerEntry) SetIsLatest(v bool) *DeleteMarkerEntry { - s.IsLatest = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketPolicyStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyStatusInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetKey sets the Key field's value. -func (s *DeleteMarkerEntry) SetKey(v string) *DeleteMarkerEntry { - s.Key = &v +// SetBucket sets the Bucket field's value. +func (s *GetBucketPolicyStatusInput) SetBucket(v string) *GetBucketPolicyStatusInput { + s.Bucket = &v return s } -// SetLastModified sets the LastModified field's value. -func (s *DeleteMarkerEntry) SetLastModified(v time.Time) *DeleteMarkerEntry { - s.LastModified = &v - return s +func (s *GetBucketPolicyStatusInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// SetOwner sets the Owner field's value. -func (s *DeleteMarkerEntry) SetOwner(v *Owner) *DeleteMarkerEntry { - s.Owner = v - return s +type GetBucketPolicyStatusOutput struct { + _ struct{} `type:"structure" payload:"PolicyStatus"` + + // The policy status for the specified bucket. + PolicyStatus *PolicyStatus `type:"structure"` } -// SetVersionId sets the VersionId field's value. -func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry { - s.VersionId = &v +// String returns the string representation +func (s GetBucketPolicyStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyStatusOutput) GoString() string { + return s.String() +} + +// SetPolicyStatus sets the PolicyStatus field's value. +func (s *GetBucketPolicyStatusOutput) SetPolicyStatus(v *PolicyStatus) *GetBucketPolicyStatusOutput { + s.PolicyStatus = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectRequest -type DeleteObjectInput struct { +type GetBucketReplicationInput struct { _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // The concatenation of the authentication device's serial number, a space, - // and the value that is displayed on your authentication device. - MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // VersionId used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } // String returns the string representation -func (s DeleteObjectInput) String() string { +func (s GetBucketReplicationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteObjectInput) GoString() string { +func (s GetBucketReplicationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteObjectInput"} +func (s *GetBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketReplicationInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteObjectInput) SetBucket(v string) *DeleteObjectInput { - s.Bucket = &v - return s -} - -// SetKey sets the Key field's value. -func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput { - s.Key = &v - return s -} -// SetMFA sets the MFA field's value. -func (s *DeleteObjectInput) SetMFA(v string) *DeleteObjectInput { - s.MFA = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetRequestPayer sets the RequestPayer field's value. -func (s *DeleteObjectInput) SetRequestPayer(v string) *DeleteObjectInput { - s.RequestPayer = &v +// SetBucket sets the Bucket field's value. +func (s *GetBucketReplicationInput) SetBucket(v string) *GetBucketReplicationInput { + s.Bucket = &v return s } -// SetVersionId sets the VersionId field's value. -func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput { - s.VersionId = &v - return s +func (s *GetBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectOutput -type DeleteObjectOutput struct { - _ struct{} `type:"structure"` - - // Specifies whether the versioned object that was permanently deleted was (true) - // or was not (false) a delete marker. - DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +type GetBucketReplicationOutput struct { + _ struct{} `type:"structure" payload:"ReplicationConfiguration"` - // Returns the version ID of the delete marker created as a result of the DELETE - // operation. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + // A container for replication rules. You can add up to 1,000 rules. The maximum + // size of a replication configuration is 2 MB. + ReplicationConfiguration *ReplicationConfiguration `type:"structure"` } // String returns the string representation -func (s DeleteObjectOutput) String() string { +func (s GetBucketReplicationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteObjectOutput) GoString() string { +func (s GetBucketReplicationOutput) GoString() string { return s.String() } -// SetDeleteMarker sets the DeleteMarker field's value. -func (s *DeleteObjectOutput) SetDeleteMarker(v bool) *DeleteObjectOutput { - s.DeleteMarker = &v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *DeleteObjectOutput) SetRequestCharged(v string) *DeleteObjectOutput { - s.RequestCharged = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput { - s.VersionId = &v +// SetReplicationConfiguration sets the ReplicationConfiguration field's value. +func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationConfiguration) *GetBucketReplicationOutput { + s.ReplicationConfiguration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingRequest -type DeleteObjectTaggingInput struct { +type GetBucketRequestPaymentInput struct { _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // The versionId of the object that the tag-set will be removed from. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } // String returns the string representation -func (s DeleteObjectTaggingInput) String() string { +func (s GetBucketRequestPaymentInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteObjectTaggingInput) GoString() string { +func (s GetBucketRequestPaymentInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteObjectTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteObjectTaggingInput"} +func (s *GetBucketRequestPaymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketRequestPaymentInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) } if invalidParams.Len() > 0 { @@ -9036,91 +12355,66 @@ func (s *DeleteObjectTaggingInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *DeleteObjectTaggingInput) SetBucket(v string) *DeleteObjectTaggingInput { +func (s *GetBucketRequestPaymentInput) SetBucket(v string) *GetBucketRequestPaymentInput { s.Bucket = &v return s } -// SetKey sets the Key field's value. -func (s *DeleteObjectTaggingInput) SetKey(v string) *DeleteObjectTaggingInput { - s.Key = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingInput { - s.VersionId = &v - return s +func (s *GetBucketRequestPaymentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingOutput -type DeleteObjectTaggingOutput struct { +type GetBucketRequestPaymentOutput struct { _ struct{} `type:"structure"` - // The versionId of the object the tag-set was removed from. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + // Specifies who pays for the download and request fees. + Payer *string `type:"string" enum:"Payer"` } // String returns the string representation -func (s DeleteObjectTaggingOutput) String() string { +func (s GetBucketRequestPaymentOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteObjectTaggingOutput) GoString() string { +func (s GetBucketRequestPaymentOutput) GoString() string { return s.String() } -// SetVersionId sets the VersionId field's value. -func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingOutput { - s.VersionId = &v +// SetPayer sets the Payer field's value. +func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaymentOutput { + s.Payer = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsRequest -type DeleteObjectsInput struct { - _ struct{} `type:"structure" payload:"Delete"` +type GetBucketTaggingInput struct { + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Delete is a required field - Delete *Delete `locationName:"Delete" type:"structure" required:"true"` - - // The concatenation of the authentication device's serial number, a space, - // and the value that is displayed on your authentication device. - MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` } // String returns the string representation -func (s DeleteObjectsInput) String() string { +func (s GetBucketTaggingInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteObjectsInput) GoString() string { +func (s GetBucketTaggingInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteObjectsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteObjectsInput"} +func (s *GetBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketTaggingInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Delete == nil { - invalidParams.Add(request.NewErrParamRequired("Delete")) - } - if s.Delete != nil { - if err := s.Delete.Validate(); err != nil { - invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) - } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) } if invalidParams.Len() > 0 { @@ -9130,147 +12424,147 @@ func (s *DeleteObjectsInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *DeleteObjectsInput) SetBucket(v string) *DeleteObjectsInput { +func (s *GetBucketTaggingInput) SetBucket(v string) *GetBucketTaggingInput { s.Bucket = &v return s } -// SetDelete sets the Delete field's value. -func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput { - s.Delete = v - return s +func (s *GetBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// SetMFA sets the MFA field's value. -func (s *DeleteObjectsInput) SetMFA(v string) *DeleteObjectsInput { - s.MFA = &v - return s +type GetBucketTaggingOutput struct { + _ struct{} `type:"structure"` + + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` } -// SetRequestPayer sets the RequestPayer field's value. -func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput { - s.RequestPayer = &v - return s +// String returns the string representation +func (s GetBucketTaggingOutput) String() string { + return awsutil.Prettify(s) } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsOutput -type DeleteObjectsOutput struct { - _ struct{} `type:"structure"` +// GoString returns the string representation +func (s GetBucketTaggingOutput) GoString() string { + return s.String() +} - Deleted []*DeletedObject `type:"list" flattened:"true"` +// SetTagSet sets the TagSet field's value. +func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput { + s.TagSet = v + return s +} - Errors []*Error `locationName:"Error" type:"list" flattened:"true"` +type GetBucketVersioningInput struct { + _ struct{} `type:"structure"` - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } // String returns the string representation -func (s DeleteObjectsOutput) String() string { +func (s GetBucketVersioningInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteObjectsOutput) GoString() string { +func (s GetBucketVersioningInput) GoString() string { return s.String() } -// SetDeleted sets the Deleted field's value. -func (s *DeleteObjectsOutput) SetDeleted(v []*DeletedObject) *DeleteObjectsOutput { - s.Deleted = v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketVersioningInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketVersioningInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetErrors sets the Errors field's value. -func (s *DeleteObjectsOutput) SetErrors(v []*Error) *DeleteObjectsOutput { - s.Errors = v +// SetBucket sets the Bucket field's value. +func (s *GetBucketVersioningInput) SetBucket(v string) *GetBucketVersioningInput { + s.Bucket = &v return s } -// SetRequestCharged sets the RequestCharged field's value. -func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput { - s.RequestCharged = &v - return s +func (s *GetBucketVersioningInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletedObject -type DeletedObject struct { +type GetBucketVersioningOutput struct { _ struct{} `type:"structure"` - DeleteMarker *bool `type:"boolean"` - - DeleteMarkerVersionId *string `type:"string"` - - Key *string `min:"1" type:"string"` + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADeleteStatus"` - VersionId *string `type:"string"` + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` } // String returns the string representation -func (s DeletedObject) String() string { +func (s GetBucketVersioningOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeletedObject) GoString() string { +func (s GetBucketVersioningOutput) GoString() string { return s.String() } -// SetDeleteMarker sets the DeleteMarker field's value. -func (s *DeletedObject) SetDeleteMarker(v bool) *DeletedObject { - s.DeleteMarker = &v - return s -} - -// SetDeleteMarkerVersionId sets the DeleteMarkerVersionId field's value. -func (s *DeletedObject) SetDeleteMarkerVersionId(v string) *DeletedObject { - s.DeleteMarkerVersionId = &v - return s -} - -// SetKey sets the Key field's value. -func (s *DeletedObject) SetKey(v string) *DeletedObject { - s.Key = &v +// SetMFADelete sets the MFADelete field's value. +func (s *GetBucketVersioningOutput) SetMFADelete(v string) *GetBucketVersioningOutput { + s.MFADelete = &v return s } -// SetVersionId sets the VersionId field's value. -func (s *DeletedObject) SetVersionId(v string) *DeletedObject { - s.VersionId = &v +// SetStatus sets the Status field's value. +func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutput { + s.Status = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Destination -type Destination struct { +type GetBucketWebsiteInput struct { _ struct{} `type:"structure"` - // Amazon resource name (ARN) of the bucket where you want Amazon S3 to store - // replicas of the object identified by the rule. - // // Bucket is a required field - Bucket *string `type:"string" required:"true"` - - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"StorageClass"` + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } // String returns the string representation -func (s Destination) String() string { +func (s GetBucketWebsiteInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Destination) GoString() string { +func (s GetBucketWebsiteInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *Destination) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Destination"} +func (s *GetBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketWebsiteInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -9279,87 +12573,102 @@ func (s *Destination) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *Destination) SetBucket(v string) *Destination { +func (s *GetBucketWebsiteInput) SetBucket(v string) *GetBucketWebsiteInput { s.Bucket = &v return s } -// SetStorageClass sets the StorageClass field's value. -func (s *Destination) SetStorageClass(v string) *Destination { - s.StorageClass = &v - return s +func (s *GetBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Error -type Error struct { +type GetBucketWebsiteOutput struct { _ struct{} `type:"structure"` - Code *string `type:"string"` + ErrorDocument *ErrorDocument `type:"structure"` - Key *string `min:"1" type:"string"` + IndexDocument *IndexDocument `type:"structure"` - Message *string `type:"string"` + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` - VersionId *string `type:"string"` + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` } // String returns the string representation -func (s Error) String() string { +func (s GetBucketWebsiteOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Error) GoString() string { +func (s GetBucketWebsiteOutput) GoString() string { return s.String() } -// SetCode sets the Code field's value. -func (s *Error) SetCode(v string) *Error { - s.Code = &v +// SetErrorDocument sets the ErrorDocument field's value. +func (s *GetBucketWebsiteOutput) SetErrorDocument(v *ErrorDocument) *GetBucketWebsiteOutput { + s.ErrorDocument = v return s } -// SetKey sets the Key field's value. -func (s *Error) SetKey(v string) *Error { - s.Key = &v +// SetIndexDocument sets the IndexDocument field's value. +func (s *GetBucketWebsiteOutput) SetIndexDocument(v *IndexDocument) *GetBucketWebsiteOutput { + s.IndexDocument = v return s } -// SetMessage sets the Message field's value. -func (s *Error) SetMessage(v string) *Error { - s.Message = &v +// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value. +func (s *GetBucketWebsiteOutput) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *GetBucketWebsiteOutput { + s.RedirectAllRequestsTo = v return s } -// SetVersionId sets the VersionId field's value. -func (s *Error) SetVersionId(v string) *Error { - s.VersionId = &v +// SetRoutingRules sets the RoutingRules field's value. +func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWebsiteOutput { + s.RoutingRules = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ErrorDocument -type ErrorDocument struct { +type GetObjectAclInput struct { _ struct{} `type:"structure"` - // The object key name to use when a 4XX class error occurs. - // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Key is a required field - Key *string `min:"1" type:"string" required:"true"` + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } // String returns the string representation -func (s ErrorDocument) String() string { +func (s GetObjectAclInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ErrorDocument) GoString() string { +func (s GetObjectAclInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ErrorDocument) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"} +func (s *GetObjectAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -9373,74 +12682,180 @@ func (s *ErrorDocument) Validate() error { return nil } +// SetBucket sets the Bucket field's value. +func (s *GetObjectAclInput) SetBucket(v string) *GetObjectAclInput { + s.Bucket = &v + return s +} + +func (s *GetObjectAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetKey sets the Key field's value. -func (s *ErrorDocument) SetKey(v string) *ErrorDocument { +func (s *GetObjectAclInput) SetKey(v string) *GetObjectAclInput { s.Key = &v return s } -// Container for key value pair that defines the criteria for the filter rule. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/FilterRule -type FilterRule struct { +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectAclInput) SetRequestPayer(v string) *GetObjectAclInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput { + s.VersionId = &v + return s +} + +type GetObjectAclOutput struct { _ struct{} `type:"structure"` - // Object key name prefix or suffix identifying one or more objects to which - // the filtering rule applies. Maximum prefix length can be up to 1,024 characters. - // Overlapping prefixes and suffixes are not supported. For more information, - // go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - Name *string `type:"string" enum:"FilterRuleName"` + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` - Value *string `type:"string"` + Owner *Owner `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } // String returns the string representation -func (s FilterRule) String() string { +func (s GetObjectAclOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s FilterRule) GoString() string { +func (s GetObjectAclOutput) GoString() string { return s.String() } -// SetName sets the Name field's value. -func (s *FilterRule) SetName(v string) *FilterRule { - s.Name = &v +// SetGrants sets the Grants field's value. +func (s *GetObjectAclOutput) SetGrants(v []*Grant) *GetObjectAclOutput { + s.Grants = v return s } -// SetValue sets the Value field's value. -func (s *FilterRule) SetValue(v string) *FilterRule { - s.Value = &v +// SetOwner sets the Owner field's value. +func (s *GetObjectAclOutput) SetOwner(v *Owner) *GetObjectAclOutput { + s.Owner = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationRequest -type GetBucketAccelerateConfigurationInput struct { +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput { + s.RequestCharged = &v + return s +} + +type GetObjectInput struct { _ struct{} `type:"structure"` - // Name of the bucket for which the accelerate configuration is retrieved. - // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of the object being read. This is a positive integer between + // 1 and 10,000. Effectively performs a 'ranged' GET request for the part specified. + // Useful for downloading just a part of an object. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Sets the Cache-Control header of the response. + ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` + + // Sets the Content-Disposition header of the response + ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` + + // Sets the Content-Encoding header of the response. + ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"` + + // Sets the Content-Language header of the response. + ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"` + + // Sets the Content-Type header of the response. + ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` + + // Sets the Expires header of the response. + ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } // String returns the string representation -func (s GetBucketAccelerateConfigurationInput) String() string { +func (s GetObjectInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketAccelerateConfigurationInput) GoString() string { +func (s GetObjectInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketAccelerateConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketAccelerateConfigurationInput"} +func (s *GetObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -9449,137 +12864,180 @@ func (s *GetBucketAccelerateConfigurationInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *GetBucketAccelerateConfigurationInput) SetBucket(v string) *GetBucketAccelerateConfigurationInput { +func (s *GetObjectInput) SetBucket(v string) *GetObjectInput { s.Bucket = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationOutput -type GetBucketAccelerateConfigurationOutput struct { - _ struct{} `type:"structure"` - - // The accelerate configuration of the bucket. - Status *string `type:"string" enum:"BucketAccelerateStatus"` +func (s *GetObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetIfMatch sets the IfMatch field's value. +func (s *GetObjectInput) SetIfMatch(v string) *GetObjectInput { + s.IfMatch = &v + return s +} + +// SetIfModifiedSince sets the IfModifiedSince field's value. +func (s *GetObjectInput) SetIfModifiedSince(v time.Time) *GetObjectInput { + s.IfModifiedSince = &v + return s +} + +// SetIfNoneMatch sets the IfNoneMatch field's value. +func (s *GetObjectInput) SetIfNoneMatch(v string) *GetObjectInput { + s.IfNoneMatch = &v + return s } -// String returns the string representation -func (s GetBucketAccelerateConfigurationOutput) String() string { - return awsutil.Prettify(s) +// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. +func (s *GetObjectInput) SetIfUnmodifiedSince(v time.Time) *GetObjectInput { + s.IfUnmodifiedSince = &v + return s } -// GoString returns the string representation -func (s GetBucketAccelerateConfigurationOutput) GoString() string { - return s.String() +// SetKey sets the Key field's value. +func (s *GetObjectInput) SetKey(v string) *GetObjectInput { + s.Key = &v + return s } -// SetStatus sets the Status field's value. -func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketAccelerateConfigurationOutput { - s.Status = &v +// SetPartNumber sets the PartNumber field's value. +func (s *GetObjectInput) SetPartNumber(v int64) *GetObjectInput { + s.PartNumber = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclRequest -type GetBucketAclInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +// SetRange sets the Range field's value. +func (s *GetObjectInput) SetRange(v string) *GetObjectInput { + s.Range = &v + return s } -// String returns the string representation -func (s GetBucketAclInput) String() string { - return awsutil.Prettify(s) +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectInput) SetRequestPayer(v string) *GetObjectInput { + s.RequestPayer = &v + return s } -// GoString returns the string representation -func (s GetBucketAclInput) GoString() string { - return s.String() +// SetResponseCacheControl sets the ResponseCacheControl field's value. +func (s *GetObjectInput) SetResponseCacheControl(v string) *GetObjectInput { + s.ResponseCacheControl = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketAclInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketAclInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } +// SetResponseContentDisposition sets the ResponseContentDisposition field's value. +func (s *GetObjectInput) SetResponseContentDisposition(v string) *GetObjectInput { + s.ResponseContentDisposition = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetResponseContentEncoding sets the ResponseContentEncoding field's value. +func (s *GetObjectInput) SetResponseContentEncoding(v string) *GetObjectInput { + s.ResponseContentEncoding = &v + return s } -// SetBucket sets the Bucket field's value. -func (s *GetBucketAclInput) SetBucket(v string) *GetBucketAclInput { - s.Bucket = &v +// SetResponseContentLanguage sets the ResponseContentLanguage field's value. +func (s *GetObjectInput) SetResponseContentLanguage(v string) *GetObjectInput { + s.ResponseContentLanguage = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclOutput -type GetBucketAclOutput struct { - _ struct{} `type:"structure"` +// SetResponseContentType sets the ResponseContentType field's value. +func (s *GetObjectInput) SetResponseContentType(v string) *GetObjectInput { + s.ResponseContentType = &v + return s +} - // A list of grants. - Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` +// SetResponseExpires sets the ResponseExpires field's value. +func (s *GetObjectInput) SetResponseExpires(v time.Time) *GetObjectInput { + s.ResponseExpires = &v + return s +} - Owner *Owner `type:"structure"` +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectInput) SetSSECustomerAlgorithm(v string) *GetObjectInput { + s.SSECustomerAlgorithm = &v + return s } -// String returns the string representation -func (s GetBucketAclOutput) String() string { - return awsutil.Prettify(s) +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput { + s.SSECustomerKey = &v + return s } -// GoString returns the string representation -func (s GetBucketAclOutput) GoString() string { - return s.String() +func (s *GetObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey } -// SetGrants sets the Grants field's value. -func (s *GetBucketAclOutput) SetGrants(v []*Grant) *GetBucketAclOutput { - s.Grants = v +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput { + s.SSECustomerKeyMD5 = &v return s } -// SetOwner sets the Owner field's value. -func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput { - s.Owner = v +// SetVersionId sets the VersionId field's value. +func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput { + s.VersionId = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationRequest -type GetBucketAnalyticsConfigurationInput struct { +type GetObjectLegalHoldInput struct { _ struct{} `type:"structure"` - // The name of the bucket from which an analytics configuration is retrieved. + // The bucket containing the object whose Legal Hold status you want to retrieve. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The identifier used to represent an analytics configuration. + // The key name for the object whose Legal Hold status you want to retrieve. // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The version ID of the object whose Legal Hold status you want to retrieve. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } // String returns the string representation -func (s GetBucketAnalyticsConfigurationInput) String() string { +func (s GetObjectLegalHoldInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketAnalyticsConfigurationInput) GoString() string { +func (s GetObjectLegalHoldInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketAnalyticsConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketAnalyticsConfigurationInput"} +func (s *GetObjectLegalHoldInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectLegalHoldInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) } if invalidParams.Len() > 0 { @@ -9589,65 +13047,87 @@ func (s *GetBucketAnalyticsConfigurationInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *GetBucketAnalyticsConfigurationInput) SetBucket(v string) *GetBucketAnalyticsConfigurationInput { +func (s *GetObjectLegalHoldInput) SetBucket(v string) *GetObjectLegalHoldInput { s.Bucket = &v return s } -// SetId sets the Id field's value. -func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyticsConfigurationInput { - s.Id = &v +func (s *GetObjectLegalHoldInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *GetObjectLegalHoldInput) SetKey(v string) *GetObjectLegalHoldInput { + s.Key = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationOutput -type GetBucketAnalyticsConfigurationOutput struct { - _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectLegalHoldInput) SetRequestPayer(v string) *GetObjectLegalHoldInput { + s.RequestPayer = &v + return s +} - // The configuration and any analyses for the analytics filter. - AnalyticsConfiguration *AnalyticsConfiguration `type:"structure"` +// SetVersionId sets the VersionId field's value. +func (s *GetObjectLegalHoldInput) SetVersionId(v string) *GetObjectLegalHoldInput { + s.VersionId = &v + return s +} + +type GetObjectLegalHoldOutput struct { + _ struct{} `type:"structure" payload:"LegalHold"` + + // The current Legal Hold status for the specified object. + LegalHold *ObjectLockLegalHold `type:"structure"` } // String returns the string representation -func (s GetBucketAnalyticsConfigurationOutput) String() string { +func (s GetObjectLegalHoldOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketAnalyticsConfigurationOutput) GoString() string { +func (s GetObjectLegalHoldOutput) GoString() string { return s.String() } -// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value. -func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *GetBucketAnalyticsConfigurationOutput { - s.AnalyticsConfiguration = v +// SetLegalHold sets the LegalHold field's value. +func (s *GetObjectLegalHoldOutput) SetLegalHold(v *ObjectLockLegalHold) *GetObjectLegalHoldOutput { + s.LegalHold = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsRequest -type GetBucketCorsInput struct { +type GetObjectLockConfigurationInput struct { _ struct{} `type:"structure"` + // The bucket whose Object Lock configuration you want to retrieve. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } // String returns the string representation -func (s GetBucketCorsInput) String() string { +func (s GetObjectLockConfigurationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketCorsInput) GoString() string { +func (s GetObjectLockConfigurationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketCorsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketCorsInput"} +func (s *GetObjectLockConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectLockConfigurationInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -9656,315 +13136,400 @@ func (s *GetBucketCorsInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *GetBucketCorsInput) SetBucket(v string) *GetBucketCorsInput { +func (s *GetObjectLockConfigurationInput) SetBucket(v string) *GetObjectLockConfigurationInput { s.Bucket = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsOutput -type GetBucketCorsOutput struct { - _ struct{} `type:"structure"` +func (s *GetObjectLockConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} - CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` +type GetObjectLockConfigurationOutput struct { + _ struct{} `type:"structure" payload:"ObjectLockConfiguration"` + + // The specified bucket's Object Lock configuration. + ObjectLockConfiguration *ObjectLockConfiguration `type:"structure"` } // String returns the string representation -func (s GetBucketCorsOutput) String() string { +func (s GetObjectLockConfigurationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketCorsOutput) GoString() string { +func (s GetObjectLockConfigurationOutput) GoString() string { return s.String() } -// SetCORSRules sets the CORSRules field's value. -func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput { - s.CORSRules = v - return s -} +// SetObjectLockConfiguration sets the ObjectLockConfiguration field's value. +func (s *GetObjectLockConfigurationOutput) SetObjectLockConfiguration(v *ObjectLockConfiguration) *GetObjectLockConfigurationOutput { + s.ObjectLockConfiguration = v + return s +} + +type GetObjectOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Object data. + Body io.ReadCloser `type:"blob"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The portion of the object returned in the response. + ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Last modified date of the object + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + // Indicates whether this object has an active legal hold. This field is only + // returned if you have permission to view an object's legal hold status. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The Object Lock mode currently in place for this object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when this object's Object Lock will expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationRequest -type GetBucketInventoryConfigurationInput struct { - _ struct{} `type:"structure"` + // The count of parts this object has. + PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` - // The name of the bucket containing the inventory configuration to retrieve. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` - // The ID used to identify the inventory configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` -} + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -// String returns the string representation -func (s GetBucketInventoryConfigurationInput) String() string { - return awsutil.Prettify(s) -} + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` -// GoString returns the string representation -func (s GetBucketInventoryConfigurationInput) GoString() string { - return s.String() -} + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketInventoryConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketInventoryConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` -// SetBucket sets the Bucket field's value. -func (s *GetBucketInventoryConfigurationInput) SetBucket(v string) *GetBucketInventoryConfigurationInput { - s.Bucket = &v - return s -} + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` -// SetId sets the Id field's value. -func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInventoryConfigurationInput { - s.Id = &v - return s -} + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationOutput -type GetBucketInventoryConfigurationOutput struct { - _ struct{} `type:"structure" payload:"InventoryConfiguration"` + // The number of tags, if any, on the object. + TagCount *int64 `location:"header" locationName:"x-amz-tagging-count" type:"integer"` - // Specifies the inventory configuration. - InventoryConfiguration *InventoryConfiguration `type:"structure"` + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } // String returns the string representation -func (s GetBucketInventoryConfigurationOutput) String() string { +func (s GetObjectOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketInventoryConfigurationOutput) GoString() string { +func (s GetObjectOutput) GoString() string { return s.String() } -// SetInventoryConfiguration sets the InventoryConfiguration field's value. -func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *InventoryConfiguration) *GetBucketInventoryConfigurationOutput { - s.InventoryConfiguration = v +// SetAcceptRanges sets the AcceptRanges field's value. +func (s *GetObjectOutput) SetAcceptRanges(v string) *GetObjectOutput { + s.AcceptRanges = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationRequest -type GetBucketLifecycleConfigurationInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +// SetBody sets the Body field's value. +func (s *GetObjectOutput) SetBody(v io.ReadCloser) *GetObjectOutput { + s.Body = v + return s } -// String returns the string representation -func (s GetBucketLifecycleConfigurationInput) String() string { - return awsutil.Prettify(s) +// SetCacheControl sets the CacheControl field's value. +func (s *GetObjectOutput) SetCacheControl(v string) *GetObjectOutput { + s.CacheControl = &v + return s } -// GoString returns the string representation -func (s GetBucketLifecycleConfigurationInput) GoString() string { - return s.String() +// SetContentDisposition sets the ContentDisposition field's value. +func (s *GetObjectOutput) SetContentDisposition(v string) *GetObjectOutput { + s.ContentDisposition = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketLifecycleConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetContentEncoding sets the ContentEncoding field's value. +func (s *GetObjectOutput) SetContentEncoding(v string) *GetObjectOutput { + s.ContentEncoding = &v + return s } -// SetBucket sets the Bucket field's value. -func (s *GetBucketLifecycleConfigurationInput) SetBucket(v string) *GetBucketLifecycleConfigurationInput { - s.Bucket = &v +// SetContentLanguage sets the ContentLanguage field's value. +func (s *GetObjectOutput) SetContentLanguage(v string) *GetObjectOutput { + s.ContentLanguage = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationOutput -type GetBucketLifecycleConfigurationOutput struct { - _ struct{} `type:"structure"` - - Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` +// SetContentLength sets the ContentLength field's value. +func (s *GetObjectOutput) SetContentLength(v int64) *GetObjectOutput { + s.ContentLength = &v + return s } -// String returns the string representation -func (s GetBucketLifecycleConfigurationOutput) String() string { - return awsutil.Prettify(s) +// SetContentRange sets the ContentRange field's value. +func (s *GetObjectOutput) SetContentRange(v string) *GetObjectOutput { + s.ContentRange = &v + return s } -// GoString returns the string representation -func (s GetBucketLifecycleConfigurationOutput) GoString() string { - return s.String() +// SetContentType sets the ContentType field's value. +func (s *GetObjectOutput) SetContentType(v string) *GetObjectOutput { + s.ContentType = &v + return s } -// SetRules sets the Rules field's value. -func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *GetBucketLifecycleConfigurationOutput { - s.Rules = v +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *GetObjectOutput) SetDeleteMarker(v bool) *GetObjectOutput { + s.DeleteMarker = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleRequest -type GetBucketLifecycleInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +// SetETag sets the ETag field's value. +func (s *GetObjectOutput) SetETag(v string) *GetObjectOutput { + s.ETag = &v + return s } -// String returns the string representation -func (s GetBucketLifecycleInput) String() string { - return awsutil.Prettify(s) +// SetExpiration sets the Expiration field's value. +func (s *GetObjectOutput) SetExpiration(v string) *GetObjectOutput { + s.Expiration = &v + return s } -// GoString returns the string representation -func (s GetBucketLifecycleInput) GoString() string { - return s.String() +// SetExpires sets the Expires field's value. +func (s *GetObjectOutput) SetExpires(v string) *GetObjectOutput { + s.Expires = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketLifecycleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetLastModified sets the LastModified field's value. +func (s *GetObjectOutput) SetLastModified(v time.Time) *GetObjectOutput { + s.LastModified = &v + return s } -// SetBucket sets the Bucket field's value. -func (s *GetBucketLifecycleInput) SetBucket(v string) *GetBucketLifecycleInput { - s.Bucket = &v +// SetMetadata sets the Metadata field's value. +func (s *GetObjectOutput) SetMetadata(v map[string]*string) *GetObjectOutput { + s.Metadata = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleOutput -type GetBucketLifecycleOutput struct { - _ struct{} `type:"structure"` - - Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"` +// SetMissingMeta sets the MissingMeta field's value. +func (s *GetObjectOutput) SetMissingMeta(v int64) *GetObjectOutput { + s.MissingMeta = &v + return s } -// String returns the string representation -func (s GetBucketLifecycleOutput) String() string { - return awsutil.Prettify(s) +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *GetObjectOutput) SetObjectLockLegalHoldStatus(v string) *GetObjectOutput { + s.ObjectLockLegalHoldStatus = &v + return s } -// GoString returns the string representation -func (s GetBucketLifecycleOutput) GoString() string { - return s.String() +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *GetObjectOutput) SetObjectLockMode(v string) *GetObjectOutput { + s.ObjectLockMode = &v + return s } -// SetRules sets the Rules field's value. -func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput { - s.Rules = v +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *GetObjectOutput) SetObjectLockRetainUntilDate(v time.Time) *GetObjectOutput { + s.ObjectLockRetainUntilDate = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationRequest -type GetBucketLocationInput struct { - _ struct{} `type:"structure"` +// SetPartsCount sets the PartsCount field's value. +func (s *GetObjectOutput) SetPartsCount(v int64) *GetObjectOutput { + s.PartsCount = &v + return s +} - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +// SetReplicationStatus sets the ReplicationStatus field's value. +func (s *GetObjectOutput) SetReplicationStatus(v string) *GetObjectOutput { + s.ReplicationStatus = &v + return s } -// String returns the string representation -func (s GetBucketLocationInput) String() string { - return awsutil.Prettify(s) +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectOutput) SetRequestCharged(v string) *GetObjectOutput { + s.RequestCharged = &v + return s } -// GoString returns the string representation -func (s GetBucketLocationInput) GoString() string { - return s.String() +// SetRestore sets the Restore field's value. +func (s *GetObjectOutput) SetRestore(v string) *GetObjectOutput { + s.Restore = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketLocationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketLocationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectOutput) SetSSECustomerAlgorithm(v string) *GetObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectOutput) SetSSECustomerKeyMD5(v string) *GetObjectOutput { + s.SSECustomerKeyMD5 = &v + return s } -// SetBucket sets the Bucket field's value. -func (s *GetBucketLocationInput) SetBucket(v string) *GetBucketLocationInput { - s.Bucket = &v +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *GetObjectOutput) SetSSEKMSKeyId(v string) *GetObjectOutput { + s.SSEKMSKeyId = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationOutput -type GetBucketLocationOutput struct { - _ struct{} `type:"structure"` +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *GetObjectOutput) SetServerSideEncryption(v string) *GetObjectOutput { + s.ServerSideEncryption = &v + return s +} - LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +// SetStorageClass sets the StorageClass field's value. +func (s *GetObjectOutput) SetStorageClass(v string) *GetObjectOutput { + s.StorageClass = &v + return s } -// String returns the string representation -func (s GetBucketLocationOutput) String() string { - return awsutil.Prettify(s) +// SetTagCount sets the TagCount field's value. +func (s *GetObjectOutput) SetTagCount(v int64) *GetObjectOutput { + s.TagCount = &v + return s } -// GoString returns the string representation -func (s GetBucketLocationOutput) GoString() string { - return s.String() +// SetVersionId sets the VersionId field's value. +func (s *GetObjectOutput) SetVersionId(v string) *GetObjectOutput { + s.VersionId = &v + return s } -// SetLocationConstraint sets the LocationConstraint field's value. -func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLocationOutput { - s.LocationConstraint = &v +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput { + s.WebsiteRedirectLocation = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingRequest -type GetBucketLoggingInput struct { +type GetObjectRetentionInput struct { _ struct{} `type:"structure"` + // The bucket containing the object whose retention settings you want to retrieve. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The key name for the object whose retention settings you want to retrieve. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The version ID for the object whose retention settings you want to retrieve. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } // String returns the string representation -func (s GetBucketLoggingInput) String() string { +func (s GetObjectRetentionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketLoggingInput) GoString() string { +func (s GetObjectRetentionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketLoggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketLoggingInput"} +func (s *GetObjectRetentionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectRetentionInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -9973,67 +13538,95 @@ func (s *GetBucketLoggingInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *GetBucketLoggingInput) SetBucket(v string) *GetBucketLoggingInput { +func (s *GetObjectRetentionInput) SetBucket(v string) *GetObjectRetentionInput { s.Bucket = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingOutput -type GetBucketLoggingOutput struct { - _ struct{} `type:"structure"` +func (s *GetObjectRetentionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} - LoggingEnabled *LoggingEnabled `type:"structure"` +// SetKey sets the Key field's value. +func (s *GetObjectRetentionInput) SetKey(v string) *GetObjectRetentionInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectRetentionInput) SetRequestPayer(v string) *GetObjectRetentionInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectRetentionInput) SetVersionId(v string) *GetObjectRetentionInput { + s.VersionId = &v + return s +} + +type GetObjectRetentionOutput struct { + _ struct{} `type:"structure" payload:"Retention"` + + // The container element for an object's retention settings. + Retention *ObjectLockRetention `type:"structure"` } // String returns the string representation -func (s GetBucketLoggingOutput) String() string { +func (s GetObjectRetentionOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketLoggingOutput) GoString() string { +func (s GetObjectRetentionOutput) GoString() string { return s.String() } -// SetLoggingEnabled sets the LoggingEnabled field's value. -func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucketLoggingOutput { - s.LoggingEnabled = v +// SetRetention sets the Retention field's value. +func (s *GetObjectRetentionOutput) SetRetention(v *ObjectLockRetention) *GetObjectRetentionOutput { + s.Retention = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationRequest -type GetBucketMetricsConfigurationInput struct { +type GetObjectTaggingInput struct { _ struct{} `type:"structure"` - // The name of the bucket containing the metrics configuration to retrieve. - // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The ID used to identify the metrics configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } // String returns the string representation -func (s GetBucketMetricsConfigurationInput) String() string { +func (s GetObjectTaggingInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketMetricsConfigurationInput) GoString() string { +func (s GetObjectTaggingInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketMetricsConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketMetricsConfigurationInput"} +func (s *GetObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectTaggingInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) } if invalidParams.Len() > 0 { @@ -10043,67 +13636,102 @@ func (s *GetBucketMetricsConfigurationInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *GetBucketMetricsConfigurationInput) SetBucket(v string) *GetBucketMetricsConfigurationInput { +func (s *GetObjectTaggingInput) SetBucket(v string) *GetObjectTaggingInput { s.Bucket = &v return s } -// SetId sets the Id field's value. -func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsConfigurationInput { - s.Id = &v +func (s *GetObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput { + s.Key = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationOutput -type GetBucketMetricsConfigurationOutput struct { - _ struct{} `type:"structure" payload:"MetricsConfiguration"` +// SetVersionId sets the VersionId field's value. +func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput { + s.VersionId = &v + return s +} - // Specifies the metrics configuration. - MetricsConfiguration *MetricsConfiguration `type:"structure"` +type GetObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` + + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } // String returns the string representation -func (s GetBucketMetricsConfigurationOutput) String() string { +func (s GetObjectTaggingOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketMetricsConfigurationOutput) GoString() string { +func (s GetObjectTaggingOutput) GoString() string { return s.String() } -// SetMetricsConfiguration sets the MetricsConfiguration field's value. -func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *MetricsConfiguration) *GetBucketMetricsConfigurationOutput { - s.MetricsConfiguration = v +// SetTagSet sets the TagSet field's value. +func (s *GetObjectTaggingOutput) SetTagSet(v []*Tag) *GetObjectTaggingOutput { + s.TagSet = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfigurationRequest -type GetBucketNotificationConfigurationRequest struct { +// SetVersionId sets the VersionId field's value. +func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput { + s.VersionId = &v + return s +} + +type GetObjectTorrentInput struct { _ struct{} `type:"structure"` - // Name of the bucket to get the notification configuration for. - // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` } // String returns the string representation -func (s GetBucketNotificationConfigurationRequest) String() string { +func (s GetObjectTorrentInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketNotificationConfigurationRequest) GoString() string { +func (s GetObjectTorrentInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketNotificationConfigurationRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketNotificationConfigurationRequest"} +func (s *GetObjectTorrentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectTorrentInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -10112,96 +13740,91 @@ func (s *GetBucketNotificationConfigurationRequest) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *GetBucketNotificationConfigurationRequest) SetBucket(v string) *GetBucketNotificationConfigurationRequest { +func (s *GetObjectTorrentInput) SetBucket(v string) *GetObjectTorrentInput { s.Bucket = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyRequest -type GetBucketPolicyInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketPolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyInput"} +func (s *GetObjectTorrentInput) getBucket() (v string) { if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) + return v } + return *s.Bucket +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetKey sets the Key field's value. +func (s *GetObjectTorrentInput) SetKey(v string) *GetObjectTorrentInput { + s.Key = &v + return s } -// SetBucket sets the Bucket field's value. -func (s *GetBucketPolicyInput) SetBucket(v string) *GetBucketPolicyInput { - s.Bucket = &v +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectTorrentInput) SetRequestPayer(v string) *GetObjectTorrentInput { + s.RequestPayer = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyOutput -type GetBucketPolicyOutput struct { - _ struct{} `type:"structure" payload:"Policy"` +type GetObjectTorrentOutput struct { + _ struct{} `type:"structure" payload:"Body"` - // The bucket policy as a JSON document. - Policy *string `type:"string"` + Body io.ReadCloser `type:"blob"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } // String returns the string representation -func (s GetBucketPolicyOutput) String() string { +func (s GetObjectTorrentOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketPolicyOutput) GoString() string { +func (s GetObjectTorrentOutput) GoString() string { return s.String() } -// SetPolicy sets the Policy field's value. -func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput { - s.Policy = &v +// SetBody sets the Body field's value. +func (s *GetObjectTorrentOutput) SetBody(v io.ReadCloser) *GetObjectTorrentOutput { + s.Body = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationRequest -type GetBucketReplicationInput struct { +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOutput { + s.RequestCharged = &v + return s +} + +type GetPublicAccessBlockInput struct { _ struct{} `type:"structure"` + // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you + // want to retrieve. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } // String returns the string representation -func (s GetBucketReplicationInput) String() string { +func (s GetPublicAccessBlockInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketReplicationInput) GoString() string { +func (s GetPublicAccessBlockInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketReplicationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketReplicationInput"} +func (s *GetPublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPublicAccessBlockInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -10210,59 +13833,66 @@ func (s *GetBucketReplicationInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *GetBucketReplicationInput) SetBucket(v string) *GetBucketReplicationInput { +func (s *GetPublicAccessBlockInput) SetBucket(v string) *GetPublicAccessBlockInput { s.Bucket = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationOutput -type GetBucketReplicationOutput struct { - _ struct{} `type:"structure" payload:"ReplicationConfiguration"` +func (s *GetPublicAccessBlockInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} - // Container for replication rules. You can add as many as 1,000 rules. Total - // replication configuration size can be up to 2 MB. - ReplicationConfiguration *ReplicationConfiguration `type:"structure"` +type GetPublicAccessBlockOutput struct { + _ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"` + + // The PublicAccessBlock configuration currently in effect for this Amazon S3 + // bucket. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `type:"structure"` } // String returns the string representation -func (s GetBucketReplicationOutput) String() string { +func (s GetPublicAccessBlockOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketReplicationOutput) GoString() string { +func (s GetPublicAccessBlockOutput) GoString() string { return s.String() } -// SetReplicationConfiguration sets the ReplicationConfiguration field's value. -func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationConfiguration) *GetBucketReplicationOutput { - s.ReplicationConfiguration = v +// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value. +func (s *GetPublicAccessBlockOutput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *GetPublicAccessBlockOutput { + s.PublicAccessBlockConfiguration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentRequest -type GetBucketRequestPaymentInput struct { +type GlacierJobParameters struct { _ struct{} `type:"structure"` - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Glacier retrieval tier at which the restore will be processed. + // + // Tier is a required field + Tier *string `type:"string" required:"true" enum:"Tier"` } // String returns the string representation -func (s GetBucketRequestPaymentInput) String() string { +func (s GlacierJobParameters) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketRequestPaymentInput) GoString() string { +func (s GlacierJobParameters) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketRequestPaymentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketRequestPaymentInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) +func (s *GlacierJobParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GlacierJobParameters"} + if s.Tier == nil { + invalidParams.Add(request.NewErrParamRequired("Tier")) } if invalidParams.Len() > 0 { @@ -10271,59 +13901,38 @@ func (s *GetBucketRequestPaymentInput) Validate() error { return nil } -// SetBucket sets the Bucket field's value. -func (s *GetBucketRequestPaymentInput) SetBucket(v string) *GetBucketRequestPaymentInput { - s.Bucket = &v +// SetTier sets the Tier field's value. +func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters { + s.Tier = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentOutput -type GetBucketRequestPaymentOutput struct { +type Grant struct { _ struct{} `type:"structure"` - // Specifies who pays for the download and request fees. - Payer *string `type:"string" enum:"Payer"` -} - -// String returns the string representation -func (s GetBucketRequestPaymentOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketRequestPaymentOutput) GoString() string { - return s.String() -} - -// SetPayer sets the Payer field's value. -func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaymentOutput { - s.Payer = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingRequest -type GetBucketTaggingInput struct { - _ struct{} `type:"structure"` + Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Specifies the permission given to the grantee. + Permission *string `type:"string" enum:"Permission"` } // String returns the string representation -func (s GetBucketTaggingInput) String() string { +func (s Grant) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketTaggingInput) GoString() string { +func (s Grant) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketTaggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) +func (s *Grant) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Grant"} + if s.Grantee != nil { + if err := s.Grantee.Validate(); err != nil { + invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -10332,59 +13941,54 @@ func (s *GetBucketTaggingInput) Validate() error { return nil } -// SetBucket sets the Bucket field's value. -func (s *GetBucketTaggingInput) SetBucket(v string) *GetBucketTaggingInput { - s.Bucket = &v +// SetGrantee sets the Grantee field's value. +func (s *Grant) SetGrantee(v *Grantee) *Grant { + s.Grantee = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingOutput -type GetBucketTaggingOutput struct { - _ struct{} `type:"structure"` - - // TagSet is a required field - TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +// SetPermission sets the Permission field's value. +func (s *Grant) SetPermission(v string) *Grant { + s.Permission = &v + return s } -// String returns the string representation -func (s GetBucketTaggingOutput) String() string { - return awsutil.Prettify(s) -} +type Grantee struct { + _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` -// GoString returns the string representation -func (s GetBucketTaggingOutput) GoString() string { - return s.String() -} + // Screen name of the grantee. + DisplayName *string `type:"string"` -// SetTagSet sets the TagSet field's value. -func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput { - s.TagSet = v - return s -} + // Email address of the grantee. + EmailAddress *string `type:"string"` -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningRequest -type GetBucketVersioningInput struct { - _ struct{} `type:"structure"` + // The canonical user ID of the grantee. + ID *string `type:"string"` - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Type of grantee + // + // Type is a required field + Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true" enum:"Type"` + + // URI of the grantee group. + URI *string `type:"string"` } // String returns the string representation -func (s GetBucketVersioningInput) String() string { +func (s Grantee) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketVersioningInput) GoString() string { +func (s Grantee) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketVersioningInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketVersioningInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) +func (s *Grantee) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Grantee"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) } if invalidParams.Len() > 0 { @@ -10393,49 +13997,37 @@ func (s *GetBucketVersioningInput) Validate() error { return nil } -// SetBucket sets the Bucket field's value. -func (s *GetBucketVersioningInput) SetBucket(v string) *GetBucketVersioningInput { - s.Bucket = &v +// SetDisplayName sets the DisplayName field's value. +func (s *Grantee) SetDisplayName(v string) *Grantee { + s.DisplayName = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningOutput -type GetBucketVersioningOutput struct { - _ struct{} `type:"structure"` - - // Specifies whether MFA delete is enabled in the bucket versioning configuration. - // This element is only returned if the bucket has been configured with MFA - // delete. If the bucket has never been so configured, this element is not returned. - MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADeleteStatus"` - - // The versioning state of the bucket. - Status *string `type:"string" enum:"BucketVersioningStatus"` -} - -// String returns the string representation -func (s GetBucketVersioningOutput) String() string { - return awsutil.Prettify(s) +// SetEmailAddress sets the EmailAddress field's value. +func (s *Grantee) SetEmailAddress(v string) *Grantee { + s.EmailAddress = &v + return s } -// GoString returns the string representation -func (s GetBucketVersioningOutput) GoString() string { - return s.String() +// SetID sets the ID field's value. +func (s *Grantee) SetID(v string) *Grantee { + s.ID = &v + return s } -// SetMFADelete sets the MFADelete field's value. -func (s *GetBucketVersioningOutput) SetMFADelete(v string) *GetBucketVersioningOutput { - s.MFADelete = &v +// SetType sets the Type field's value. +func (s *Grantee) SetType(v string) *Grantee { + s.Type = &v return s } -// SetStatus sets the Status field's value. -func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutput { - s.Status = &v +// SetURI sets the URI field's value. +func (s *Grantee) SetURI(v string) *Grantee { + s.URI = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteRequest -type GetBucketWebsiteInput struct { +type HeadBucketInput struct { _ struct{} `type:"structure"` // Bucket is a required field @@ -10443,21 +14035,24 @@ type GetBucketWebsiteInput struct { } // String returns the string representation -func (s GetBucketWebsiteInput) String() string { +func (s HeadBucketInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketWebsiteInput) GoString() string { +func (s HeadBucketInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketWebsiteInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketWebsiteInput"} +func (s *HeadBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeadBucketInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -10466,94 +14061,111 @@ func (s *GetBucketWebsiteInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *GetBucketWebsiteInput) SetBucket(v string) *GetBucketWebsiteInput { +func (s *HeadBucketInput) SetBucket(v string) *HeadBucketInput { s.Bucket = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteOutput -type GetBucketWebsiteOutput struct { - _ struct{} `type:"structure"` - - ErrorDocument *ErrorDocument `type:"structure"` - - IndexDocument *IndexDocument `type:"structure"` - - RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` - - RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` -} - -// String returns the string representation -func (s GetBucketWebsiteOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketWebsiteOutput) GoString() string { - return s.String() -} - -// SetErrorDocument sets the ErrorDocument field's value. -func (s *GetBucketWebsiteOutput) SetErrorDocument(v *ErrorDocument) *GetBucketWebsiteOutput { - s.ErrorDocument = v - return s +func (s *HeadBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// SetIndexDocument sets the IndexDocument field's value. -func (s *GetBucketWebsiteOutput) SetIndexDocument(v *IndexDocument) *GetBucketWebsiteOutput { - s.IndexDocument = v - return s +type HeadBucketOutput struct { + _ struct{} `type:"structure"` } -// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value. -func (s *GetBucketWebsiteOutput) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *GetBucketWebsiteOutput { - s.RedirectAllRequestsTo = v - return s +// String returns the string representation +func (s HeadBucketOutput) String() string { + return awsutil.Prettify(s) } -// SetRoutingRules sets the RoutingRules field's value. -func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWebsiteOutput { - s.RoutingRules = v - return s +// GoString returns the string representation +func (s HeadBucketOutput) GoString() string { + return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclRequest -type GetObjectAclInput struct { +type HeadObjectInput struct { _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` + // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + // Part number of the object being read. This is a positive integer between + // 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified. + // Useful querying about the size of the part and the number of parts in this + // object. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + Range *string `location:"header" locationName:"Range" type:"string"` + // Confirms that the requester knows that she or he will be charged for the // request. Bucket owners need not specify this parameter in their requests. // Documentation on downloading objects from requester pays buckets can be found // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + // VersionId used to reference a specific version of the object. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } // String returns the string representation -func (s GetObjectAclInput) String() string { +func (s HeadObjectInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetObjectAclInput) GoString() string { +func (s HeadObjectInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectAclInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectAclInput"} +func (s *HeadObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeadObjectInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -10568,610 +14180,813 @@ func (s *GetObjectAclInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *GetObjectAclInput) SetBucket(v string) *GetObjectAclInput { +func (s *HeadObjectInput) SetBucket(v string) *HeadObjectInput { s.Bucket = &v return s } -// SetKey sets the Key field's value. -func (s *GetObjectAclInput) SetKey(v string) *GetObjectAclInput { - s.Key = &v +func (s *HeadObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetIfMatch sets the IfMatch field's value. +func (s *HeadObjectInput) SetIfMatch(v string) *HeadObjectInput { + s.IfMatch = &v return s } -// SetRequestPayer sets the RequestPayer field's value. -func (s *GetObjectAclInput) SetRequestPayer(v string) *GetObjectAclInput { - s.RequestPayer = &v +// SetIfModifiedSince sets the IfModifiedSince field's value. +func (s *HeadObjectInput) SetIfModifiedSince(v time.Time) *HeadObjectInput { + s.IfModifiedSince = &v return s } -// SetVersionId sets the VersionId field's value. -func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput { - s.VersionId = &v +// SetIfNoneMatch sets the IfNoneMatch field's value. +func (s *HeadObjectInput) SetIfNoneMatch(v string) *HeadObjectInput { + s.IfNoneMatch = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclOutput -type GetObjectAclOutput struct { - _ struct{} `type:"structure"` +// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. +func (s *HeadObjectInput) SetIfUnmodifiedSince(v time.Time) *HeadObjectInput { + s.IfUnmodifiedSince = &v + return s +} - // A list of grants. - Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` +// SetKey sets the Key field's value. +func (s *HeadObjectInput) SetKey(v string) *HeadObjectInput { + s.Key = &v + return s +} - Owner *Owner `type:"structure"` +// SetPartNumber sets the PartNumber field's value. +func (s *HeadObjectInput) SetPartNumber(v int64) *HeadObjectInput { + s.PartNumber = &v + return s +} - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +// SetRange sets the Range field's value. +func (s *HeadObjectInput) SetRange(v string) *HeadObjectInput { + s.Range = &v + return s } -// String returns the string representation -func (s GetObjectAclOutput) String() string { - return awsutil.Prettify(s) +// SetRequestPayer sets the RequestPayer field's value. +func (s *HeadObjectInput) SetRequestPayer(v string) *HeadObjectInput { + s.RequestPayer = &v + return s } -// GoString returns the string representation -func (s GetObjectAclOutput) GoString() string { - return s.String() +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *HeadObjectInput) SetSSECustomerAlgorithm(v string) *HeadObjectInput { + s.SSECustomerAlgorithm = &v + return s } -// SetGrants sets the Grants field's value. -func (s *GetObjectAclOutput) SetGrants(v []*Grant) *GetObjectAclOutput { - s.Grants = v +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *HeadObjectInput) SetSSECustomerKey(v string) *HeadObjectInput { + s.SSECustomerKey = &v return s } -// SetOwner sets the Owner field's value. -func (s *GetObjectAclOutput) SetOwner(v *Owner) *GetObjectAclOutput { - s.Owner = v +func (s *HeadObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *HeadObjectInput) SetSSECustomerKeyMD5(v string) *HeadObjectInput { + s.SSECustomerKeyMD5 = &v return s } -// SetRequestCharged sets the RequestCharged field's value. -func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput { - s.RequestCharged = &v +// SetVersionId sets the VersionId field's value. +func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput { + s.VersionId = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRequest -type GetObjectInput struct { +type HeadObjectOutput struct { _ struct{} `type:"structure"` - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` - // Return the object only if its entity tag (ETag) is the same as the one specified, - // otherwise return a 412 (precondition failed). - IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - // Return the object only if it has been modified since the specified time, - // otherwise return a 304 (not modified). - IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - // Return the object only if its entity tag (ETag) is different from the one - // specified, otherwise return a 304 (not modified). - IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - // Return the object only if it has not been modified since the specified time, - // otherwise return a 412 (precondition failed). - IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - // Part number of the object being read. This is a positive integer between - // 1 and 10,000. Effectively performs a 'ranged' GET request for the part specified. - // Useful for downloading just a part of an object. - PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - // Downloads the specified range bytes of an object. For more information about - // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. - Range *string `location:"header" locationName:"Range" type:"string"` + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL + ETag *string `location:"header" locationName:"ETag" type:"string"` - // Sets the Cache-Control header of the response. - ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - // Sets the Content-Disposition header of the response - ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` - // Sets the Content-Encoding header of the response. - ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"` + // Last modified date of the object + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` - // Sets the Content-Language header of the response. - ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"` + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - // Sets the Content-Type header of the response. - ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` - // Sets the Expires header of the response. - ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"iso8601"` + // The Legal Hold status for the specified object. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + // The Object Lock mode currently in place for this object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when this object's Object Lock will expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // The count of parts this object has. + PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` - // VersionId used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } // String returns the string representation -func (s GetObjectInput) String() string { +func (s HeadObjectOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetObjectInput) GoString() string { +func (s HeadObjectOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetAcceptRanges sets the AcceptRanges field's value. +func (s *HeadObjectOutput) SetAcceptRanges(v string) *HeadObjectOutput { + s.AcceptRanges = &v + return s } -// SetBucket sets the Bucket field's value. -func (s *GetObjectInput) SetBucket(v string) *GetObjectInput { - s.Bucket = &v +// SetCacheControl sets the CacheControl field's value. +func (s *HeadObjectOutput) SetCacheControl(v string) *HeadObjectOutput { + s.CacheControl = &v return s } -// SetIfMatch sets the IfMatch field's value. -func (s *GetObjectInput) SetIfMatch(v string) *GetObjectInput { - s.IfMatch = &v +// SetContentDisposition sets the ContentDisposition field's value. +func (s *HeadObjectOutput) SetContentDisposition(v string) *HeadObjectOutput { + s.ContentDisposition = &v return s } -// SetIfModifiedSince sets the IfModifiedSince field's value. -func (s *GetObjectInput) SetIfModifiedSince(v time.Time) *GetObjectInput { - s.IfModifiedSince = &v +// SetContentEncoding sets the ContentEncoding field's value. +func (s *HeadObjectOutput) SetContentEncoding(v string) *HeadObjectOutput { + s.ContentEncoding = &v return s } -// SetIfNoneMatch sets the IfNoneMatch field's value. -func (s *GetObjectInput) SetIfNoneMatch(v string) *GetObjectInput { - s.IfNoneMatch = &v +// SetContentLanguage sets the ContentLanguage field's value. +func (s *HeadObjectOutput) SetContentLanguage(v string) *HeadObjectOutput { + s.ContentLanguage = &v return s } -// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. -func (s *GetObjectInput) SetIfUnmodifiedSince(v time.Time) *GetObjectInput { - s.IfUnmodifiedSince = &v +// SetContentLength sets the ContentLength field's value. +func (s *HeadObjectOutput) SetContentLength(v int64) *HeadObjectOutput { + s.ContentLength = &v return s } -// SetKey sets the Key field's value. -func (s *GetObjectInput) SetKey(v string) *GetObjectInput { - s.Key = &v +// SetContentType sets the ContentType field's value. +func (s *HeadObjectOutput) SetContentType(v string) *HeadObjectOutput { + s.ContentType = &v return s } -// SetPartNumber sets the PartNumber field's value. -func (s *GetObjectInput) SetPartNumber(v int64) *GetObjectInput { - s.PartNumber = &v +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *HeadObjectOutput) SetDeleteMarker(v bool) *HeadObjectOutput { + s.DeleteMarker = &v return s } -// SetRange sets the Range field's value. -func (s *GetObjectInput) SetRange(v string) *GetObjectInput { - s.Range = &v +// SetETag sets the ETag field's value. +func (s *HeadObjectOutput) SetETag(v string) *HeadObjectOutput { + s.ETag = &v return s } -// SetRequestPayer sets the RequestPayer field's value. -func (s *GetObjectInput) SetRequestPayer(v string) *GetObjectInput { - s.RequestPayer = &v +// SetExpiration sets the Expiration field's value. +func (s *HeadObjectOutput) SetExpiration(v string) *HeadObjectOutput { + s.Expiration = &v return s } -// SetResponseCacheControl sets the ResponseCacheControl field's value. -func (s *GetObjectInput) SetResponseCacheControl(v string) *GetObjectInput { - s.ResponseCacheControl = &v +// SetExpires sets the Expires field's value. +func (s *HeadObjectOutput) SetExpires(v string) *HeadObjectOutput { + s.Expires = &v return s } -// SetResponseContentDisposition sets the ResponseContentDisposition field's value. -func (s *GetObjectInput) SetResponseContentDisposition(v string) *GetObjectInput { - s.ResponseContentDisposition = &v +// SetLastModified sets the LastModified field's value. +func (s *HeadObjectOutput) SetLastModified(v time.Time) *HeadObjectOutput { + s.LastModified = &v return s } -// SetResponseContentEncoding sets the ResponseContentEncoding field's value. -func (s *GetObjectInput) SetResponseContentEncoding(v string) *GetObjectInput { - s.ResponseContentEncoding = &v +// SetMetadata sets the Metadata field's value. +func (s *HeadObjectOutput) SetMetadata(v map[string]*string) *HeadObjectOutput { + s.Metadata = v return s } -// SetResponseContentLanguage sets the ResponseContentLanguage field's value. -func (s *GetObjectInput) SetResponseContentLanguage(v string) *GetObjectInput { - s.ResponseContentLanguage = &v +// SetMissingMeta sets the MissingMeta field's value. +func (s *HeadObjectOutput) SetMissingMeta(v int64) *HeadObjectOutput { + s.MissingMeta = &v return s } -// SetResponseContentType sets the ResponseContentType field's value. -func (s *GetObjectInput) SetResponseContentType(v string) *GetObjectInput { - s.ResponseContentType = &v +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *HeadObjectOutput) SetObjectLockLegalHoldStatus(v string) *HeadObjectOutput { + s.ObjectLockLegalHoldStatus = &v return s } -// SetResponseExpires sets the ResponseExpires field's value. -func (s *GetObjectInput) SetResponseExpires(v time.Time) *GetObjectInput { - s.ResponseExpires = &v +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *HeadObjectOutput) SetObjectLockMode(v string) *HeadObjectOutput { + s.ObjectLockMode = &v return s } -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *GetObjectInput) SetSSECustomerAlgorithm(v string) *GetObjectInput { - s.SSECustomerAlgorithm = &v +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *HeadObjectOutput) SetObjectLockRetainUntilDate(v time.Time) *HeadObjectOutput { + s.ObjectLockRetainUntilDate = &v return s } -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput { - s.SSECustomerKey = &v +// SetPartsCount sets the PartsCount field's value. +func (s *HeadObjectOutput) SetPartsCount(v int64) *HeadObjectOutput { + s.PartsCount = &v return s } -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput { - s.SSECustomerKeyMD5 = &v +// SetReplicationStatus sets the ReplicationStatus field's value. +func (s *HeadObjectOutput) SetReplicationStatus(v string) *HeadObjectOutput { + s.ReplicationStatus = &v return s } -// SetVersionId sets the VersionId field's value. -func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput { - s.VersionId = &v +// SetRequestCharged sets the RequestCharged field's value. +func (s *HeadObjectOutput) SetRequestCharged(v string) *HeadObjectOutput { + s.RequestCharged = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectOutput -type GetObjectOutput struct { - _ struct{} `type:"structure" payload:"Body"` - - AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` - - // Object data. - Body io.ReadCloser `type:"blob"` +// SetRestore sets the Restore field's value. +func (s *HeadObjectOutput) SetRestore(v string) *HeadObjectOutput { + s.Restore = &v + return s +} - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *HeadObjectOutput) SetSSECustomerAlgorithm(v string) *HeadObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *HeadObjectOutput) SetSSECustomerKeyMD5(v string) *HeadObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *HeadObjectOutput) SetSSEKMSKeyId(v string) *HeadObjectOutput { + s.SSEKMSKeyId = &v + return s +} - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *HeadObjectOutput) SetServerSideEncryption(v string) *HeadObjectOutput { + s.ServerSideEncryption = &v + return s +} - // Size of the body in bytes. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` +// SetStorageClass sets the StorageClass field's value. +func (s *HeadObjectOutput) SetStorageClass(v string) *HeadObjectOutput { + s.StorageClass = &v + return s +} - // The portion of the object returned in the response. - ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` +// SetVersionId sets the VersionId field's value. +func (s *HeadObjectOutput) SetVersionId(v string) *HeadObjectOutput { + s.VersionId = &v + return s +} - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutput { + s.WebsiteRedirectLocation = &v + return s +} - // Specifies whether the object retrieved was (true) or was not (false) a Delete - // Marker. If false, this response header does not appear in the response. - DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` +type IndexDocument struct { + _ struct{} `type:"structure"` - // An ETag is an opaque identifier assigned by a web server to a specific version - // of a resource found at a URL - ETag *string `location:"header" locationName:"ETag" type:"string"` + // A suffix that is appended to a request that is for a directory on the website + // endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ + // the data that is returned will be for the object with the key name images/index.html) + // The suffix must not be empty and must not include a slash character. + // + // Suffix is a required field + Suffix *string `type:"string" required:"true"` +} - // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key value pairs - // providing object expiration information. The value of the rule-id is URL - // encoded. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` +// String returns the string representation +func (s IndexDocument) String() string { + return awsutil.Prettify(s) +} - // The date and time at which the object is no longer cacheable. - Expires *string `location:"header" locationName:"Expires" type:"string"` +// GoString returns the string representation +func (s IndexDocument) GoString() string { + return s.String() +} - // Last modified date of the object - LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *IndexDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IndexDocument"} + if s.Suffix == nil { + invalidParams.Add(request.NewErrParamRequired("Suffix")) + } - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // This is set to the number of metadata entries not returned in x-amz-meta - // headers. This can happen if you create metadata using an API like SOAP that - // supports more flexible metadata than the REST API. For example, using SOAP, - // you can create metadata whose values are not legal HTTP headers. - MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` +// SetSuffix sets the Suffix field's value. +func (s *IndexDocument) SetSuffix(v string) *IndexDocument { + s.Suffix = &v + return s +} - // The count of parts this object has. - PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` +type Initiator struct { + _ struct{} `type:"structure"` - ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + // Name of the Principal. + DisplayName *string `type:"string"` - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + // If the principal is an AWS account, it provides the Canonical User ID. If + // the principal is an IAM User, it provides a user ARN value. + ID *string `type:"string"` +} - // Provides information about object restoration operation and expiration time - // of the restored object copy. - Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` +// String returns the string representation +func (s Initiator) String() string { + return awsutil.Prettify(s) +} - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` +// GoString returns the string representation +func (s Initiator) GoString() string { + return s.String() +} - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` +// SetDisplayName sets the DisplayName field's value. +func (s *Initiator) SetDisplayName(v string) *Initiator { + s.DisplayName = &v + return s +} - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` +// SetID sets the ID field's value. +func (s *Initiator) SetID(v string) *Initiator { + s.ID = &v + return s +} - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +// Describes the serialization format of the object. +type InputSerialization struct { + _ struct{} `type:"structure"` - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + // Describes the serialization of a CSV-encoded object. + CSV *CSVInput `type:"structure"` - // The number of tags, if any, on the object. - TagCount *int64 `location:"header" locationName:"x-amz-tagging-count" type:"integer"` + // Specifies object's compression format. Valid values: NONE, GZIP, BZIP2. Default + // Value: NONE. + CompressionType *string `type:"string" enum:"CompressionType"` - // Version of the object. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + // Specifies JSON as object's input serialization format. + JSON *JSONInput `type:"structure"` - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` + // Specifies Parquet as object's input serialization format. + Parquet *ParquetInput `type:"structure"` } // String returns the string representation -func (s GetObjectOutput) String() string { +func (s InputSerialization) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetObjectOutput) GoString() string { +func (s InputSerialization) GoString() string { return s.String() } -// SetAcceptRanges sets the AcceptRanges field's value. -func (s *GetObjectOutput) SetAcceptRanges(v string) *GetObjectOutput { - s.AcceptRanges = &v +// SetCSV sets the CSV field's value. +func (s *InputSerialization) SetCSV(v *CSVInput) *InputSerialization { + s.CSV = v return s } -// SetBody sets the Body field's value. -func (s *GetObjectOutput) SetBody(v io.ReadCloser) *GetObjectOutput { - s.Body = v +// SetCompressionType sets the CompressionType field's value. +func (s *InputSerialization) SetCompressionType(v string) *InputSerialization { + s.CompressionType = &v return s } -// SetCacheControl sets the CacheControl field's value. -func (s *GetObjectOutput) SetCacheControl(v string) *GetObjectOutput { - s.CacheControl = &v +// SetJSON sets the JSON field's value. +func (s *InputSerialization) SetJSON(v *JSONInput) *InputSerialization { + s.JSON = v return s } -// SetContentDisposition sets the ContentDisposition field's value. -func (s *GetObjectOutput) SetContentDisposition(v string) *GetObjectOutput { - s.ContentDisposition = &v +// SetParquet sets the Parquet field's value. +func (s *InputSerialization) SetParquet(v *ParquetInput) *InputSerialization { + s.Parquet = v return s } -// SetContentEncoding sets the ContentEncoding field's value. -func (s *GetObjectOutput) SetContentEncoding(v string) *GetObjectOutput { - s.ContentEncoding = &v - return s +type InventoryConfiguration struct { + _ struct{} `type:"structure"` + + // Contains information about where to publish the inventory results. + // + // Destination is a required field + Destination *InventoryDestination `type:"structure" required:"true"` + + // Specifies an inventory filter. The inventory only includes objects that meet + // the filter's criteria. + Filter *InventoryFilter `type:"structure"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // Specifies which object version(s) to included in the inventory results. + // + // IncludedObjectVersions is a required field + IncludedObjectVersions *string `type:"string" required:"true" enum:"InventoryIncludedObjectVersions"` + + // Specifies whether the inventory is enabled or disabled. + // + // IsEnabled is a required field + IsEnabled *bool `type:"boolean" required:"true"` + + // Contains the optional fields that are included in the inventory results. + OptionalFields []*string `locationNameList:"Field" type:"list"` + + // Specifies the schedule for generating inventory results. + // + // Schedule is a required field + Schedule *InventorySchedule `type:"structure" required:"true"` } -// SetContentLanguage sets the ContentLanguage field's value. -func (s *GetObjectOutput) SetContentLanguage(v string) *GetObjectOutput { - s.ContentLanguage = &v - return s +// String returns the string representation +func (s InventoryConfiguration) String() string { + return awsutil.Prettify(s) } -// SetContentLength sets the ContentLength field's value. -func (s *GetObjectOutput) SetContentLength(v int64) *GetObjectOutput { - s.ContentLength = &v - return s +// GoString returns the string representation +func (s InventoryConfiguration) GoString() string { + return s.String() } -// SetContentRange sets the ContentRange field's value. -func (s *GetObjectOutput) SetContentRange(v string) *GetObjectOutput { - s.ContentRange = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryConfiguration"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.IncludedObjectVersions == nil { + invalidParams.Add(request.NewErrParamRequired("IncludedObjectVersions")) + } + if s.IsEnabled == nil { + invalidParams.Add(request.NewErrParamRequired("IsEnabled")) + } + if s.Schedule == nil { + invalidParams.Add(request.NewErrParamRequired("Schedule")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.Schedule != nil { + if err := s.Schedule.Validate(); err != nil { + invalidParams.AddNested("Schedule", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetContentType sets the ContentType field's value. -func (s *GetObjectOutput) SetContentType(v string) *GetObjectOutput { - s.ContentType = &v +// SetDestination sets the Destination field's value. +func (s *InventoryConfiguration) SetDestination(v *InventoryDestination) *InventoryConfiguration { + s.Destination = v return s } -// SetDeleteMarker sets the DeleteMarker field's value. -func (s *GetObjectOutput) SetDeleteMarker(v bool) *GetObjectOutput { - s.DeleteMarker = &v +// SetFilter sets the Filter field's value. +func (s *InventoryConfiguration) SetFilter(v *InventoryFilter) *InventoryConfiguration { + s.Filter = v return s } -// SetETag sets the ETag field's value. -func (s *GetObjectOutput) SetETag(v string) *GetObjectOutput { - s.ETag = &v +// SetId sets the Id field's value. +func (s *InventoryConfiguration) SetId(v string) *InventoryConfiguration { + s.Id = &v return s } -// SetExpiration sets the Expiration field's value. -func (s *GetObjectOutput) SetExpiration(v string) *GetObjectOutput { - s.Expiration = &v +// SetIncludedObjectVersions sets the IncludedObjectVersions field's value. +func (s *InventoryConfiguration) SetIncludedObjectVersions(v string) *InventoryConfiguration { + s.IncludedObjectVersions = &v return s } -// SetExpires sets the Expires field's value. -func (s *GetObjectOutput) SetExpires(v string) *GetObjectOutput { - s.Expires = &v +// SetIsEnabled sets the IsEnabled field's value. +func (s *InventoryConfiguration) SetIsEnabled(v bool) *InventoryConfiguration { + s.IsEnabled = &v return s } -// SetLastModified sets the LastModified field's value. -func (s *GetObjectOutput) SetLastModified(v time.Time) *GetObjectOutput { - s.LastModified = &v +// SetOptionalFields sets the OptionalFields field's value. +func (s *InventoryConfiguration) SetOptionalFields(v []*string) *InventoryConfiguration { + s.OptionalFields = v return s } -// SetMetadata sets the Metadata field's value. -func (s *GetObjectOutput) SetMetadata(v map[string]*string) *GetObjectOutput { - s.Metadata = v +// SetSchedule sets the Schedule field's value. +func (s *InventoryConfiguration) SetSchedule(v *InventorySchedule) *InventoryConfiguration { + s.Schedule = v return s } -// SetMissingMeta sets the MissingMeta field's value. -func (s *GetObjectOutput) SetMissingMeta(v int64) *GetObjectOutput { - s.MissingMeta = &v - return s +type InventoryDestination struct { + _ struct{} `type:"structure"` + + // Contains the bucket name, file format, bucket owner (optional), and prefix + // (optional) where inventory results are published. + // + // S3BucketDestination is a required field + S3BucketDestination *InventoryS3BucketDestination `type:"structure" required:"true"` +} + +// String returns the string representation +func (s InventoryDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryDestination"} + if s.S3BucketDestination == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketDestination")) + } + if s.S3BucketDestination != nil { + if err := s.S3BucketDestination.Validate(); err != nil { + invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetPartsCount sets the PartsCount field's value. -func (s *GetObjectOutput) SetPartsCount(v int64) *GetObjectOutput { - s.PartsCount = &v +// SetS3BucketDestination sets the S3BucketDestination field's value. +func (s *InventoryDestination) SetS3BucketDestination(v *InventoryS3BucketDestination) *InventoryDestination { + s.S3BucketDestination = v return s } -// SetReplicationStatus sets the ReplicationStatus field's value. -func (s *GetObjectOutput) SetReplicationStatus(v string) *GetObjectOutput { - s.ReplicationStatus = &v - return s +// Contains the type of server-side encryption used to encrypt the inventory +// results. +type InventoryEncryption struct { + _ struct{} `type:"structure"` + + // Specifies the use of SSE-KMS to encrypt delivered Inventory reports. + SSEKMS *SSEKMS `locationName:"SSE-KMS" type:"structure"` + + // Specifies the use of SSE-S3 to encrypt delivered Inventory reports. + SSES3 *SSES3 `locationName:"SSE-S3" type:"structure"` } -// SetRequestCharged sets the RequestCharged field's value. -func (s *GetObjectOutput) SetRequestCharged(v string) *GetObjectOutput { - s.RequestCharged = &v - return s +// String returns the string representation +func (s InventoryEncryption) String() string { + return awsutil.Prettify(s) } -// SetRestore sets the Restore field's value. -func (s *GetObjectOutput) SetRestore(v string) *GetObjectOutput { - s.Restore = &v - return s +// GoString returns the string representation +func (s InventoryEncryption) GoString() string { + return s.String() } -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *GetObjectOutput) SetSSECustomerAlgorithm(v string) *GetObjectOutput { - s.SSECustomerAlgorithm = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryEncryption) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryEncryption"} + if s.SSEKMS != nil { + if err := s.SSEKMS.Validate(); err != nil { + invalidParams.AddNested("SSEKMS", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *GetObjectOutput) SetSSECustomerKeyMD5(v string) *GetObjectOutput { - s.SSECustomerKeyMD5 = &v +// SetSSEKMS sets the SSEKMS field's value. +func (s *InventoryEncryption) SetSSEKMS(v *SSEKMS) *InventoryEncryption { + s.SSEKMS = v return s } -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *GetObjectOutput) SetSSEKMSKeyId(v string) *GetObjectOutput { - s.SSEKMSKeyId = &v +// SetSSES3 sets the SSES3 field's value. +func (s *InventoryEncryption) SetSSES3(v *SSES3) *InventoryEncryption { + s.SSES3 = v return s } -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *GetObjectOutput) SetServerSideEncryption(v string) *GetObjectOutput { - s.ServerSideEncryption = &v - return s +type InventoryFilter struct { + _ struct{} `type:"structure"` + + // The prefix that an object must have to be included in the inventory results. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` } -// SetStorageClass sets the StorageClass field's value. -func (s *GetObjectOutput) SetStorageClass(v string) *GetObjectOutput { - s.StorageClass = &v - return s +// String returns the string representation +func (s InventoryFilter) String() string { + return awsutil.Prettify(s) } -// SetTagCount sets the TagCount field's value. -func (s *GetObjectOutput) SetTagCount(v int64) *GetObjectOutput { - s.TagCount = &v - return s +// GoString returns the string representation +func (s InventoryFilter) GoString() string { + return s.String() } -// SetVersionId sets the VersionId field's value. -func (s *GetObjectOutput) SetVersionId(v string) *GetObjectOutput { - s.VersionId = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryFilter"} + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. -func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput { - s.WebsiteRedirectLocation = &v +// SetPrefix sets the Prefix field's value. +func (s *InventoryFilter) SetPrefix(v string) *InventoryFilter { + s.Prefix = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingRequest -type GetObjectTaggingInput struct { +type InventoryS3BucketDestination struct { _ struct{} `type:"structure"` + // The ID of the account that owns the destination bucket. + AccountId *string `type:"string"` + + // The Amazon resource name (ARN) of the bucket where inventory results will + // be published. + // // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + Bucket *string `type:"string" required:"true"` - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + // Contains the type of server-side encryption used to encrypt the inventory + // results. + Encryption *InventoryEncryption `type:"structure"` - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` + // Specifies the output format of the inventory results. + // + // Format is a required field + Format *string `type:"string" required:"true" enum:"InventoryFormat"` + + // The prefix that is prepended to all inventory results. + Prefix *string `type:"string"` } // String returns the string representation -func (s GetObjectTaggingInput) String() string { +func (s InventoryS3BucketDestination) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetObjectTaggingInput) GoString() string { +func (s InventoryS3BucketDestination) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectTaggingInput"} +func (s *InventoryS3BucketDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryS3BucketDestination"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) + if s.Format == nil { + invalidParams.Add(request.NewErrParamRequired("Format")) } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + if s.Encryption != nil { + if err := s.Encryption.Validate(); err != nil { + invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -11180,178 +14995,257 @@ func (s *GetObjectTaggingInput) Validate() error { return nil } +// SetAccountId sets the AccountId field's value. +func (s *InventoryS3BucketDestination) SetAccountId(v string) *InventoryS3BucketDestination { + s.AccountId = &v + return s +} + // SetBucket sets the Bucket field's value. -func (s *GetObjectTaggingInput) SetBucket(v string) *GetObjectTaggingInput { +func (s *InventoryS3BucketDestination) SetBucket(v string) *InventoryS3BucketDestination { s.Bucket = &v return s } -// SetKey sets the Key field's value. -func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput { - s.Key = &v +func (s *InventoryS3BucketDestination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetEncryption sets the Encryption field's value. +func (s *InventoryS3BucketDestination) SetEncryption(v *InventoryEncryption) *InventoryS3BucketDestination { + s.Encryption = v return s } -// SetVersionId sets the VersionId field's value. -func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput { - s.VersionId = &v +// SetFormat sets the Format field's value. +func (s *InventoryS3BucketDestination) SetFormat(v string) *InventoryS3BucketDestination { + s.Format = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingOutput -type GetObjectTaggingOutput struct { - _ struct{} `type:"structure"` +// SetPrefix sets the Prefix field's value. +func (s *InventoryS3BucketDestination) SetPrefix(v string) *InventoryS3BucketDestination { + s.Prefix = &v + return s +} - // TagSet is a required field - TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +type InventorySchedule struct { + _ struct{} `type:"structure"` - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + // Specifies how frequently inventory results are produced. + // + // Frequency is a required field + Frequency *string `type:"string" required:"true" enum:"InventoryFrequency"` } // String returns the string representation -func (s GetObjectTaggingOutput) String() string { +func (s InventorySchedule) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetObjectTaggingOutput) GoString() string { +func (s InventorySchedule) GoString() string { return s.String() } -// SetTagSet sets the TagSet field's value. -func (s *GetObjectTaggingOutput) SetTagSet(v []*Tag) *GetObjectTaggingOutput { - s.TagSet = v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventorySchedule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventorySchedule"} + if s.Frequency == nil { + invalidParams.Add(request.NewErrParamRequired("Frequency")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetVersionId sets the VersionId field's value. -func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput { - s.VersionId = &v +// SetFrequency sets the Frequency field's value. +func (s *InventorySchedule) SetFrequency(v string) *InventorySchedule { + s.Frequency = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentRequest -type GetObjectTorrentInput struct { +type JSONInput struct { _ struct{} `type:"structure"` - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The type of JSON. Valid values: Document, Lines. + Type *string `type:"string" enum:"JSONType"` +} - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` +// String returns the string representation +func (s JSONInput) String() string { + return awsutil.Prettify(s) +} - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +// GoString returns the string representation +func (s JSONInput) GoString() string { + return s.String() +} + +// SetType sets the Type field's value. +func (s *JSONInput) SetType(v string) *JSONInput { + s.Type = &v + return s +} + +type JSONOutput struct { + _ struct{} `type:"structure"` + + // The value used to separate individual records in the output. + RecordDelimiter *string `type:"string"` } // String returns the string representation -func (s GetObjectTorrentInput) String() string { +func (s JSONOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetObjectTorrentInput) GoString() string { +func (s JSONOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectTorrentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectTorrentInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *JSONOutput) SetRecordDelimiter(v string) *JSONOutput { + s.RecordDelimiter = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// A container for object key name prefix and suffix filtering rules. +type KeyFilter struct { + _ struct{} `type:"structure"` + + // A list of containers for the key value pair that defines the criteria for + // the filter rule. + FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"` } -// SetBucket sets the Bucket field's value. -func (s *GetObjectTorrentInput) SetBucket(v string) *GetObjectTorrentInput { - s.Bucket = &v - return s +// String returns the string representation +func (s KeyFilter) String() string { + return awsutil.Prettify(s) } -// SetKey sets the Key field's value. -func (s *GetObjectTorrentInput) SetKey(v string) *GetObjectTorrentInput { - s.Key = &v - return s +// GoString returns the string representation +func (s KeyFilter) GoString() string { + return s.String() } -// SetRequestPayer sets the RequestPayer field's value. -func (s *GetObjectTorrentInput) SetRequestPayer(v string) *GetObjectTorrentInput { - s.RequestPayer = &v +// SetFilterRules sets the FilterRules field's value. +func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter { + s.FilterRules = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentOutput -type GetObjectTorrentOutput struct { - _ struct{} `type:"structure" payload:"Body"` +// A container for specifying the configuration for AWS Lambda notifications. +type LambdaFunctionConfiguration struct { + _ struct{} `type:"structure"` - Body io.ReadCloser `type:"blob"` + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + // A container for object key name filtering rules. For information about key + // name filtering, see Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Lambda cloud function that Amazon S3 + // can invoke when it detects events of the specified type. + // + // LambdaFunctionArn is a required field + LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` } // String returns the string representation -func (s GetObjectTorrentOutput) String() string { +func (s LambdaFunctionConfiguration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetObjectTorrentOutput) GoString() string { +func (s LambdaFunctionConfiguration) GoString() string { return s.String() } -// SetBody sets the Body field's value. -func (s *GetObjectTorrentOutput) SetBody(v io.ReadCloser) *GetObjectTorrentOutput { - s.Body = v +// Validate inspects the fields of the type to determine if they are valid. +func (s *LambdaFunctionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LambdaFunctionConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.LambdaFunctionArn == nil { + invalidParams.Add(request.NewErrParamRequired("LambdaFunctionArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *LambdaFunctionConfiguration) SetEvents(v []*string) *LambdaFunctionConfiguration { + s.Events = v return s } -// SetRequestCharged sets the RequestCharged field's value. -func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOutput { - s.RequestCharged = &v +// SetFilter sets the Filter field's value. +func (s *LambdaFunctionConfiguration) SetFilter(v *NotificationConfigurationFilter) *LambdaFunctionConfiguration { + s.Filter = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GlacierJobParameters -type GlacierJobParameters struct { +// SetId sets the Id field's value. +func (s *LambdaFunctionConfiguration) SetId(v string) *LambdaFunctionConfiguration { + s.Id = &v + return s +} + +// SetLambdaFunctionArn sets the LambdaFunctionArn field's value. +func (s *LambdaFunctionConfiguration) SetLambdaFunctionArn(v string) *LambdaFunctionConfiguration { + s.LambdaFunctionArn = &v + return s +} + +type LifecycleConfiguration struct { _ struct{} `type:"structure"` - // Glacier retrieval tier at which the restore will be processed. - // - // Tier is a required field - Tier *string `type:"string" required:"true" enum:"Tier"` + // Rules is a required field + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"` } // String returns the string representation -func (s GlacierJobParameters) String() string { +func (s LifecycleConfiguration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GlacierJobParameters) GoString() string { +func (s LifecycleConfiguration) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GlacierJobParameters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GlacierJobParameters"} - if s.Tier == nil { - invalidParams.Add(request.NewErrParamRequired("Tier")) +func (s *LifecycleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -11360,96 +15254,118 @@ func (s *GlacierJobParameters) Validate() error { return nil } -// SetTier sets the Tier field's value. -func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters { - s.Tier = &v +// SetRules sets the Rules field's value. +func (s *LifecycleConfiguration) SetRules(v []*Rule) *LifecycleConfiguration { + s.Rules = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grant -type Grant struct { +type LifecycleExpiration struct { _ struct{} `type:"structure"` - Grantee *Grantee `type:"structure"` + // Indicates at what date the object is to be moved or deleted. Should be in + // GMT ISO 8601 Format. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` - // Specifies the permission given to the grantee. - Permission *string `type:"string" enum:"Permission"` + // Indicates whether Amazon S3 will remove a delete marker with no noncurrent + // versions. If set to true, the delete marker will be expired; if set to false + // the policy takes no action. This cannot be specified with Days or Date in + // a Lifecycle Expiration Policy. + ExpiredObjectDeleteMarker *bool `type:"boolean"` } // String returns the string representation -func (s Grant) String() string { +func (s LifecycleExpiration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Grant) GoString() string { +func (s LifecycleExpiration) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *Grant) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Grant"} - if s.Grantee != nil { - if err := s.Grantee.Validate(); err != nil { - invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetDate sets the Date field's value. +func (s *LifecycleExpiration) SetDate(v time.Time) *LifecycleExpiration { + s.Date = &v + return s } -// SetGrantee sets the Grantee field's value. -func (s *Grant) SetGrantee(v *Grantee) *Grant { - s.Grantee = v +// SetDays sets the Days field's value. +func (s *LifecycleExpiration) SetDays(v int64) *LifecycleExpiration { + s.Days = &v return s } -// SetPermission sets the Permission field's value. -func (s *Grant) SetPermission(v string) *Grant { - s.Permission = &v +// SetExpiredObjectDeleteMarker sets the ExpiredObjectDeleteMarker field's value. +func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExpiration { + s.ExpiredObjectDeleteMarker = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grantee -type Grantee struct { - _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` +type LifecycleRule struct { + _ struct{} `type:"structure"` - // Screen name of the grantee. - DisplayName *string `type:"string"` + // Specifies the days since the initiation of an Incomplete Multipart Upload + // that Lifecycle will wait before permanently removing all parts of the upload. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` - // Email address of the grantee. - EmailAddress *string `type:"string"` + Expiration *LifecycleExpiration `type:"structure"` - // The canonical user ID of the grantee. + // The Filter is used to identify objects that a Lifecycle Rule applies to. + // A Filter must have exactly one of Prefix, Tag, or And specified. + Filter *LifecycleRuleFilter `type:"structure"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. ID *string `type:"string"` - // Type of grantee + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` + + // Prefix identifying one or more objects to which the rule applies. This is + // deprecated; use Filter instead. // - // Type is a required field - Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true" enum:"Type"` + // Deprecated: Prefix has been deprecated + Prefix *string `deprecated:"true" type:"string"` - // URI of the grantee group. - URI *string `type:"string"` + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` } // String returns the string representation -func (s Grantee) String() string { +func (s LifecycleRule) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Grantee) GoString() string { +func (s LifecycleRule) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *Grantee) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Grantee"} - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) +func (s *LifecycleRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRule"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -11458,59 +15374,95 @@ func (s *Grantee) Validate() error { return nil } -// SetDisplayName sets the DisplayName field's value. -func (s *Grantee) SetDisplayName(v string) *Grantee { - s.DisplayName = &v +// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. +func (s *LifecycleRule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *LifecycleRule { + s.AbortIncompleteMultipartUpload = v return s } -// SetEmailAddress sets the EmailAddress field's value. -func (s *Grantee) SetEmailAddress(v string) *Grantee { - s.EmailAddress = &v +// SetExpiration sets the Expiration field's value. +func (s *LifecycleRule) SetExpiration(v *LifecycleExpiration) *LifecycleRule { + s.Expiration = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *LifecycleRule) SetFilter(v *LifecycleRuleFilter) *LifecycleRule { + s.Filter = v return s } // SetID sets the ID field's value. -func (s *Grantee) SetID(v string) *Grantee { +func (s *LifecycleRule) SetID(v string) *LifecycleRule { s.ID = &v return s } -// SetType sets the Type field's value. -func (s *Grantee) SetType(v string) *Grantee { - s.Type = &v +// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. +func (s *LifecycleRule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *LifecycleRule { + s.NoncurrentVersionExpiration = v return s } -// SetURI sets the URI field's value. -func (s *Grantee) SetURI(v string) *Grantee { - s.URI = &v +// SetNoncurrentVersionTransitions sets the NoncurrentVersionTransitions field's value. +func (s *LifecycleRule) SetNoncurrentVersionTransitions(v []*NoncurrentVersionTransition) *LifecycleRule { + s.NoncurrentVersionTransitions = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRule) SetPrefix(v string) *LifecycleRule { + s.Prefix = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *LifecycleRule) SetStatus(v string) *LifecycleRule { + s.Status = &v + return s +} + +// SetTransitions sets the Transitions field's value. +func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule { + s.Transitions = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketRequest -type HeadBucketInput struct { +// This is used in a Lifecycle Rule Filter to apply a logical AND to two or +// more predicates. The Lifecycle Rule will apply to any object matching all +// of the predicates configured inside the And operator. +type LifecycleRuleAndOperator struct { _ struct{} `type:"structure"` - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + Prefix *string `type:"string"` + + // All of these tags must exist in the object's tag set in order for the rule + // to apply. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` } // String returns the string representation -func (s HeadBucketInput) String() string { +func (s LifecycleRuleAndOperator) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s HeadBucketInput) GoString() string { +func (s LifecycleRuleAndOperator) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *HeadBucketInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "HeadBucketInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) +func (s *LifecycleRuleAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -11519,109 +15471,57 @@ func (s *HeadBucketInput) Validate() error { return nil } -// SetBucket sets the Bucket field's value. -func (s *HeadBucketInput) SetBucket(v string) *HeadBucketInput { - s.Bucket = &v +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRuleAndOperator) SetPrefix(v string) *LifecycleRuleAndOperator { + s.Prefix = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketOutput -type HeadBucketOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s HeadBucketOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HeadBucketOutput) GoString() string { - return s.String() +// SetTags sets the Tags field's value. +func (s *LifecycleRuleAndOperator) SetTags(v []*Tag) *LifecycleRuleAndOperator { + s.Tags = v + return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectRequest -type HeadObjectInput struct { +// The Filter is used to identify objects that a Lifecycle Rule applies to. +// A Filter must have exactly one of Prefix, Tag, or And specified. +type LifecycleRuleFilter struct { _ struct{} `type:"structure"` - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Return the object only if its entity tag (ETag) is the same as the one specified, - // otherwise return a 412 (precondition failed). - IfMatch *string `location:"header" locationName:"If-Match" type:"string"` - - // Return the object only if it has been modified since the specified time, - // otherwise return a 304 (not modified). - IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` - - // Return the object only if its entity tag (ETag) is different from the one - // specified, otherwise return a 304 (not modified). - IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` - - // Return the object only if it has not been modified since the specified time, - // otherwise return a 412 (precondition failed). - IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` - - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Part number of the object being read. This is a positive integer between - // 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified. - // Useful querying about the size of the part and the number of parts in this - // object. - PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` - - // Downloads the specified range bytes of an object. For more information about - // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. - Range *string `location:"header" locationName:"Range" type:"string"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + // This is used in a Lifecycle Rule Filter to apply a logical AND to two or + // more predicates. The Lifecycle Rule will apply to any object matching all + // of the predicates configured inside the And operator. + And *LifecycleRuleAndOperator `type:"structure"` - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + // Prefix identifying one or more objects to which the rule applies. + Prefix *string `type:"string"` - // VersionId used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` + // This tag must exist in the object's tag set in order for the rule to apply. + Tag *Tag `type:"structure"` } // String returns the string representation -func (s HeadObjectInput) String() string { +func (s LifecycleRuleFilter) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s HeadObjectInput) GoString() string { +func (s LifecycleRuleFilter) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *HeadObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "HeadObjectInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) +func (s *LifecycleRuleFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -11630,369 +15530,464 @@ func (s *HeadObjectInput) Validate() error { return nil } -// SetBucket sets the Bucket field's value. -func (s *HeadObjectInput) SetBucket(v string) *HeadObjectInput { - s.Bucket = &v +// SetAnd sets the And field's value. +func (s *LifecycleRuleFilter) SetAnd(v *LifecycleRuleAndOperator) *LifecycleRuleFilter { + s.And = v return s } -// SetIfMatch sets the IfMatch field's value. -func (s *HeadObjectInput) SetIfMatch(v string) *HeadObjectInput { - s.IfMatch = &v +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRuleFilter) SetPrefix(v string) *LifecycleRuleFilter { + s.Prefix = &v return s } -// SetIfModifiedSince sets the IfModifiedSince field's value. -func (s *HeadObjectInput) SetIfModifiedSince(v time.Time) *HeadObjectInput { - s.IfModifiedSince = &v +// SetTag sets the Tag field's value. +func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter { + s.Tag = v return s } -// SetIfNoneMatch sets the IfNoneMatch field's value. -func (s *HeadObjectInput) SetIfNoneMatch(v string) *HeadObjectInput { - s.IfNoneMatch = &v - return s -} +type ListBucketAnalyticsConfigurationsInput struct { + _ struct{} `type:"structure"` -// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. -func (s *HeadObjectInput) SetIfUnmodifiedSince(v time.Time) *HeadObjectInput { - s.IfUnmodifiedSince = &v - return s -} + // The name of the bucket from which analytics configurations are retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -// SetKey sets the Key field's value. -func (s *HeadObjectInput) SetKey(v string) *HeadObjectInput { - s.Key = &v - return s + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` } -// SetPartNumber sets the PartNumber field's value. -func (s *HeadObjectInput) SetPartNumber(v int64) *HeadObjectInput { - s.PartNumber = &v - return s +// String returns the string representation +func (s ListBucketAnalyticsConfigurationsInput) String() string { + return awsutil.Prettify(s) } -// SetRange sets the Range field's value. -func (s *HeadObjectInput) SetRange(v string) *HeadObjectInput { - s.Range = &v - return s +// GoString returns the string representation +func (s ListBucketAnalyticsConfigurationsInput) GoString() string { + return s.String() } -// SetRequestPayer sets the RequestPayer field's value. -func (s *HeadObjectInput) SetRequestPayer(v string) *HeadObjectInput { - s.RequestPayer = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketAnalyticsConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketAnalyticsConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *HeadObjectInput) SetSSECustomerAlgorithm(v string) *HeadObjectInput { - s.SSECustomerAlgorithm = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *HeadObjectInput) SetSSECustomerKey(v string) *HeadObjectInput { - s.SSECustomerKey = &v +// SetBucket sets the Bucket field's value. +func (s *ListBucketAnalyticsConfigurationsInput) SetBucket(v string) *ListBucketAnalyticsConfigurationsInput { + s.Bucket = &v return s } -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *HeadObjectInput) SetSSECustomerKeyMD5(v string) *HeadObjectInput { - s.SSECustomerKeyMD5 = &v - return s +func (s *ListBucketAnalyticsConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// SetVersionId sets the VersionId field's value. -func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput { - s.VersionId = &v +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsInput { + s.ContinuationToken = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectOutput -type HeadObjectOutput struct { +type ListBucketAnalyticsConfigurationsOutput struct { _ struct{} `type:"structure"` - AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + // The list of analytics configurations for a bucket. + AnalyticsConfigurationList []*AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"list" flattened:"true"` - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + // The ContinuationToken that represents where this request began. + ContinuationToken *string `type:"string"` - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + // Indicates whether the returned list of analytics configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // will be provided for a subsequent request. + IsTruncated *bool `type:"boolean"` - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + // NextContinuationToken is sent when isTruncated is true, which indicates that + // there are more analytics configurations to list. The next request must include + // this NextContinuationToken. The token is obfuscated and is not a usable value. + NextContinuationToken *string `type:"string"` +} - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` +// String returns the string representation +func (s ListBucketAnalyticsConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} - // Size of the body in bytes. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` +// GoString returns the string representation +func (s ListBucketAnalyticsConfigurationsOutput) GoString() string { + return s.String() +} - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` +// SetAnalyticsConfigurationList sets the AnalyticsConfigurationList field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetAnalyticsConfigurationList(v []*AnalyticsConfiguration) *ListBucketAnalyticsConfigurationsOutput { + s.AnalyticsConfigurationList = v + return s +} - // Specifies whether the object retrieved was (true) or was not (false) a Delete - // Marker. If false, this response header does not appear in the response. - DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { + s.ContinuationToken = &v + return s +} - // An ETag is an opaque identifier assigned by a web server to a specific version - // of a resource found at a URL - ETag *string `location:"header" locationName:"ETag" type:"string"` +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketAnalyticsConfigurationsOutput { + s.IsTruncated = &v + return s +} - // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key value pairs - // providing object expiration information. The value of the rule-id is URL - // encoded. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { + s.NextContinuationToken = &v + return s +} - // The date and time at which the object is no longer cacheable. - Expires *string `location:"header" locationName:"Expires" type:"string"` +type ListBucketInventoryConfigurationsInput struct { + _ struct{} `type:"structure"` - // Last modified date of the object - LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` + // The name of the bucket containing the inventory configurations to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + // The marker used to continue an inventory configuration listing that has been + // truncated. Use the NextContinuationToken from a previously truncated list + // response to continue the listing. The continuation token is an opaque value + // that Amazon S3 understands. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` +} - // This is set to the number of metadata entries not returned in x-amz-meta - // headers. This can happen if you create metadata using an API like SOAP that - // supports more flexible metadata than the REST API. For example, using SOAP, - // you can create metadata whose values are not legal HTTP headers. - MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` +// String returns the string representation +func (s ListBucketInventoryConfigurationsInput) String() string { + return awsutil.Prettify(s) +} - // The count of parts this object has. - PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` +// GoString returns the string representation +func (s ListBucketInventoryConfigurationsInput) GoString() string { + return s.String() +} - ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketInventoryConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketInventoryConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // Provides information about object restoration operation and expiration time - // of the restored object copy. - Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` +// SetBucket sets the Bucket field's value. +func (s *ListBucketInventoryConfigurationsInput) SetBucket(v string) *ListBucketInventoryConfigurationsInput { + s.Bucket = &v + return s +} - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` +func (s *ListBucketInventoryConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsInput { + s.ContinuationToken = &v + return s +} - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` +type ListBucketInventoryConfigurationsOutput struct { + _ struct{} `type:"structure"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + // If sent in the request, the marker that is used as a starting point for this + // inventory configuration list response. + ContinuationToken *string `type:"string"` - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + // The list of inventory configurations for a bucket. + InventoryConfigurationList []*InventoryConfiguration `locationName:"InventoryConfiguration" type:"list" flattened:"true"` - // Version of the object. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + // Indicates whether the returned list of inventory configurations is truncated + // in this response. A value of true indicates that the list is truncated. + IsTruncated *bool `type:"boolean"` - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` + // The marker used to continue this inventory configuration listing. Use the + // NextContinuationToken from this response to continue the listing in a subsequent + // request. The continuation token is an opaque value that Amazon S3 understands. + NextContinuationToken *string `type:"string"` } // String returns the string representation -func (s HeadObjectOutput) String() string { +func (s ListBucketInventoryConfigurationsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s HeadObjectOutput) GoString() string { +func (s ListBucketInventoryConfigurationsOutput) GoString() string { return s.String() } -// SetAcceptRanges sets the AcceptRanges field's value. -func (s *HeadObjectOutput) SetAcceptRanges(v string) *HeadObjectOutput { - s.AcceptRanges = &v +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsOutput { + s.ContinuationToken = &v return s } -// SetCacheControl sets the CacheControl field's value. -func (s *HeadObjectOutput) SetCacheControl(v string) *HeadObjectOutput { - s.CacheControl = &v +// SetInventoryConfigurationList sets the InventoryConfigurationList field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetInventoryConfigurationList(v []*InventoryConfiguration) *ListBucketInventoryConfigurationsOutput { + s.InventoryConfigurationList = v return s } -// SetContentDisposition sets the ContentDisposition field's value. -func (s *HeadObjectOutput) SetContentDisposition(v string) *HeadObjectOutput { - s.ContentDisposition = &v +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetIsTruncated(v bool) *ListBucketInventoryConfigurationsOutput { + s.IsTruncated = &v return s } -// SetContentEncoding sets the ContentEncoding field's value. -func (s *HeadObjectOutput) SetContentEncoding(v string) *HeadObjectOutput { - s.ContentEncoding = &v +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketInventoryConfigurationsOutput { + s.NextContinuationToken = &v return s } -// SetContentLanguage sets the ContentLanguage field's value. -func (s *HeadObjectOutput) SetContentLanguage(v string) *HeadObjectOutput { - s.ContentLanguage = &v - return s +type ListBucketMetricsConfigurationsInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket containing the metrics configurations to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The marker that is used to continue a metrics configuration listing that + // has been truncated. Use the NextContinuationToken from a previously truncated + // list response to continue the listing. The continuation token is an opaque + // value that Amazon S3 understands. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` } -// SetContentLength sets the ContentLength field's value. -func (s *HeadObjectOutput) SetContentLength(v int64) *HeadObjectOutput { - s.ContentLength = &v - return s +// String returns the string representation +func (s ListBucketMetricsConfigurationsInput) String() string { + return awsutil.Prettify(s) } -// SetContentType sets the ContentType field's value. -func (s *HeadObjectOutput) SetContentType(v string) *HeadObjectOutput { - s.ContentType = &v - return s +// GoString returns the string representation +func (s ListBucketMetricsConfigurationsInput) GoString() string { + return s.String() } -// SetDeleteMarker sets the DeleteMarker field's value. -func (s *HeadObjectOutput) SetDeleteMarker(v bool) *HeadObjectOutput { - s.DeleteMarker = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketMetricsConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketMetricsConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetETag sets the ETag field's value. -func (s *HeadObjectOutput) SetETag(v string) *HeadObjectOutput { - s.ETag = &v +// SetBucket sets the Bucket field's value. +func (s *ListBucketMetricsConfigurationsInput) SetBucket(v string) *ListBucketMetricsConfigurationsInput { + s.Bucket = &v return s } -// SetExpiration sets the Expiration field's value. -func (s *HeadObjectOutput) SetExpiration(v string) *HeadObjectOutput { - s.Expiration = &v +func (s *ListBucketMetricsConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsInput { + s.ContinuationToken = &v return s } -// SetExpires sets the Expires field's value. -func (s *HeadObjectOutput) SetExpires(v string) *HeadObjectOutput { - s.Expires = &v - return s +type ListBucketMetricsConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The marker that is used as a starting point for this metrics configuration + // list response. This value is present if it was sent in the request. + ContinuationToken *string `type:"string"` + + // Indicates whether the returned list of metrics configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // will be provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // The list of metrics configurations for a bucket. + MetricsConfigurationList []*MetricsConfiguration `locationName:"MetricsConfiguration" type:"list" flattened:"true"` + + // The marker used to continue a metrics configuration listing that has been + // truncated. Use the NextContinuationToken from a previously truncated list + // response to continue the listing. The continuation token is an opaque value + // that Amazon S3 understands. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation +func (s ListBucketMetricsConfigurationsOutput) String() string { + return awsutil.Prettify(s) } -// SetLastModified sets the LastModified field's value. -func (s *HeadObjectOutput) SetLastModified(v time.Time) *HeadObjectOutput { - s.LastModified = &v - return s +// GoString returns the string representation +func (s ListBucketMetricsConfigurationsOutput) GoString() string { + return s.String() } -// SetMetadata sets the Metadata field's value. -func (s *HeadObjectOutput) SetMetadata(v map[string]*string) *HeadObjectOutput { - s.Metadata = v +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsOutput { + s.ContinuationToken = &v return s } -// SetMissingMeta sets the MissingMeta field's value. -func (s *HeadObjectOutput) SetMissingMeta(v int64) *HeadObjectOutput { - s.MissingMeta = &v +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketMetricsConfigurationsOutput { + s.IsTruncated = &v return s } -// SetPartsCount sets the PartsCount field's value. -func (s *HeadObjectOutput) SetPartsCount(v int64) *HeadObjectOutput { - s.PartsCount = &v +// SetMetricsConfigurationList sets the MetricsConfigurationList field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetMetricsConfigurationList(v []*MetricsConfiguration) *ListBucketMetricsConfigurationsOutput { + s.MetricsConfigurationList = v return s } -// SetReplicationStatus sets the ReplicationStatus field's value. -func (s *HeadObjectOutput) SetReplicationStatus(v string) *HeadObjectOutput { - s.ReplicationStatus = &v +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketMetricsConfigurationsOutput { + s.NextContinuationToken = &v return s } -// SetRequestCharged sets the RequestCharged field's value. -func (s *HeadObjectOutput) SetRequestCharged(v string) *HeadObjectOutput { - s.RequestCharged = &v - return s +type ListBucketsInput struct { + _ struct{} `type:"structure"` } -// SetRestore sets the Restore field's value. -func (s *HeadObjectOutput) SetRestore(v string) *HeadObjectOutput { - s.Restore = &v - return s +// String returns the string representation +func (s ListBucketsInput) String() string { + return awsutil.Prettify(s) } -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *HeadObjectOutput) SetSSECustomerAlgorithm(v string) *HeadObjectOutput { - s.SSECustomerAlgorithm = &v - return s +// GoString returns the string representation +func (s ListBucketsInput) GoString() string { + return s.String() } -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *HeadObjectOutput) SetSSECustomerKeyMD5(v string) *HeadObjectOutput { - s.SSECustomerKeyMD5 = &v - return s -} +type ListBucketsOutput struct { + _ struct{} `type:"structure"` -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *HeadObjectOutput) SetSSEKMSKeyId(v string) *HeadObjectOutput { - s.SSEKMSKeyId = &v - return s + Buckets []*Bucket `locationNameList:"Bucket" type:"list"` + + Owner *Owner `type:"structure"` } -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *HeadObjectOutput) SetServerSideEncryption(v string) *HeadObjectOutput { - s.ServerSideEncryption = &v - return s +// String returns the string representation +func (s ListBucketsOutput) String() string { + return awsutil.Prettify(s) } -// SetStorageClass sets the StorageClass field's value. -func (s *HeadObjectOutput) SetStorageClass(v string) *HeadObjectOutput { - s.StorageClass = &v - return s +// GoString returns the string representation +func (s ListBucketsOutput) GoString() string { + return s.String() } -// SetVersionId sets the VersionId field's value. -func (s *HeadObjectOutput) SetVersionId(v string) *HeadObjectOutput { - s.VersionId = &v +// SetBuckets sets the Buckets field's value. +func (s *ListBucketsOutput) SetBuckets(v []*Bucket) *ListBucketsOutput { + s.Buckets = v return s } -// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. -func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutput { - s.WebsiteRedirectLocation = &v +// SetOwner sets the Owner field's value. +func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput { + s.Owner = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/IndexDocument -type IndexDocument struct { +type ListMultipartUploadsInput struct { _ struct{} `type:"structure"` - // A suffix that is appended to a request that is for a directory on the website - // endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ - // the data that is returned will be for the object with the key name images/index.html) - // The suffix must not be empty and must not include a slash character. - // - // Suffix is a required field - Suffix *string `type:"string" required:"true"` + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Together with upload-id-marker, this parameter specifies the multipart upload + // after which listing should begin. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of multipart uploads, from 1 to 1,000, to return + // in the response body. 1,000 is the maximum number of uploads that can be + // returned in a response. + MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"` + + // Lists in-progress uploads only for those keys that begin with the specified + // prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter + // is ignored. + UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` } // String returns the string representation -func (s IndexDocument) String() string { +func (s ListMultipartUploadsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s IndexDocument) GoString() string { +func (s ListMultipartUploadsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *IndexDocument) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "IndexDocument"} - if s.Suffix == nil { - invalidParams.Add(request.NewErrParamRequired("Suffix")) +func (s *ListMultipartUploadsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListMultipartUploadsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) } if invalidParams.Len() > 0 { @@ -12001,206 +15996,237 @@ func (s *IndexDocument) Validate() error { return nil } -// SetSuffix sets the Suffix field's value. -func (s *IndexDocument) SetSuffix(v string) *IndexDocument { - s.Suffix = &v +// SetBucket sets the Bucket field's value. +func (s *ListMultipartUploadsInput) SetBucket(v string) *ListMultipartUploadsInput { + s.Bucket = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Initiator -type Initiator struct { - _ struct{} `type:"structure"` +func (s *ListMultipartUploadsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} - // Name of the Principal. - DisplayName *string `type:"string"` +// SetDelimiter sets the Delimiter field's value. +func (s *ListMultipartUploadsInput) SetDelimiter(v string) *ListMultipartUploadsInput { + s.Delimiter = &v + return s +} - // If the principal is an AWS account, it provides the Canonical User ID. If - // the principal is an IAM User, it provides a user ARN value. - ID *string `type:"string"` +// SetEncodingType sets the EncodingType field's value. +func (s *ListMultipartUploadsInput) SetEncodingType(v string) *ListMultipartUploadsInput { + s.EncodingType = &v + return s } -// String returns the string representation -func (s Initiator) String() string { - return awsutil.Prettify(s) +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListMultipartUploadsInput) SetKeyMarker(v string) *ListMultipartUploadsInput { + s.KeyMarker = &v + return s } -// GoString returns the string representation -func (s Initiator) GoString() string { - return s.String() +// SetMaxUploads sets the MaxUploads field's value. +func (s *ListMultipartUploadsInput) SetMaxUploads(v int64) *ListMultipartUploadsInput { + s.MaxUploads = &v + return s } -// SetDisplayName sets the DisplayName field's value. -func (s *Initiator) SetDisplayName(v string) *Initiator { - s.DisplayName = &v +// SetPrefix sets the Prefix field's value. +func (s *ListMultipartUploadsInput) SetPrefix(v string) *ListMultipartUploadsInput { + s.Prefix = &v return s } -// SetID sets the ID field's value. -func (s *Initiator) SetID(v string) *Initiator { - s.ID = &v +// SetUploadIdMarker sets the UploadIdMarker field's value. +func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUploadsInput { + s.UploadIdMarker = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryConfiguration -type InventoryConfiguration struct { +type ListMultipartUploadsOutput struct { _ struct{} `type:"structure"` - // Contains information about where to publish the inventory results. - // - // Destination is a required field - Destination *InventoryDestination `type:"structure" required:"true"` + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` - // Specifies an inventory filter. The inventory only includes objects that meet - // the filter's criteria. - Filter *InventoryFilter `type:"structure"` + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` - // The ID used to identify the inventory configuration. - // - // Id is a required field - Id *string `type:"string" required:"true"` + Delimiter *string `type:"string"` - // Specifies which object version(s) to included in the inventory results. - // - // IncludedObjectVersions is a required field - IncludedObjectVersions *string `type:"string" required:"true" enum:"InventoryIncludedObjectVersions"` + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` - // Specifies whether the inventory is enabled or disabled. - // - // IsEnabled is a required field - IsEnabled *bool `type:"boolean" required:"true"` + // Indicates whether the returned list of multipart uploads is truncated. A + // value of true indicates that the list was truncated. The list can be truncated + // if the number of multipart uploads exceeds the limit allowed or specified + // by max uploads. + IsTruncated *bool `type:"boolean"` - // Contains the optional fields that are included in the inventory results. - OptionalFields []*string `locationNameList:"Field" type:"list"` + // The key at or after which the listing began. + KeyMarker *string `type:"string"` + + // Maximum number of multipart uploads that could have been included in the + // response. + MaxUploads *int64 `type:"integer"` + + // When a list is truncated, this element specifies the value that should be + // used for the key-marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // When a list is truncated, this element specifies the value that should be + // used for the upload-id-marker request parameter in a subsequent request. + NextUploadIdMarker *string `type:"string"` + + // When a prefix is provided in the request, this field contains the specified + // prefix. The result contains only keys starting with the specified prefix. + Prefix *string `type:"string"` + + // Upload ID after which listing began. + UploadIdMarker *string `type:"string"` - // Specifies the schedule for generating inventory results. - // - // Schedule is a required field - Schedule *InventorySchedule `type:"structure" required:"true"` + Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` } // String returns the string representation -func (s InventoryConfiguration) String() string { +func (s ListMultipartUploadsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s InventoryConfiguration) GoString() string { +func (s ListMultipartUploadsOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *InventoryConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InventoryConfiguration"} - if s.Destination == nil { - invalidParams.Add(request.NewErrParamRequired("Destination")) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.IncludedObjectVersions == nil { - invalidParams.Add(request.NewErrParamRequired("IncludedObjectVersions")) - } - if s.IsEnabled == nil { - invalidParams.Add(request.NewErrParamRequired("IsEnabled")) - } - if s.Schedule == nil { - invalidParams.Add(request.NewErrParamRequired("Schedule")) - } - if s.Destination != nil { - if err := s.Destination.Validate(); err != nil { - invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) - } - } - if s.Filter != nil { - if err := s.Filter.Validate(); err != nil { - invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) - } - } - if s.Schedule != nil { - if err := s.Schedule.Validate(); err != nil { - invalidParams.AddNested("Schedule", err.(request.ErrInvalidParams)) - } - } +// SetBucket sets the Bucket field's value. +func (s *ListMultipartUploadsOutput) SetBucket(v string) *ListMultipartUploadsOutput { + s.Bucket = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams +func (s *ListMultipartUploadsOutput) getBucket() (v string) { + if s.Bucket == nil { + return v } - return nil + return *s.Bucket } -// SetDestination sets the Destination field's value. -func (s *InventoryConfiguration) SetDestination(v *InventoryDestination) *InventoryConfiguration { - s.Destination = v +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListMultipartUploadsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListMultipartUploadsOutput { + s.CommonPrefixes = v return s } -// SetFilter sets the Filter field's value. -func (s *InventoryConfiguration) SetFilter(v *InventoryFilter) *InventoryConfiguration { - s.Filter = v +// SetDelimiter sets the Delimiter field's value. +func (s *ListMultipartUploadsOutput) SetDelimiter(v string) *ListMultipartUploadsOutput { + s.Delimiter = &v return s } -// SetId sets the Id field's value. -func (s *InventoryConfiguration) SetId(v string) *InventoryConfiguration { - s.Id = &v +// SetEncodingType sets the EncodingType field's value. +func (s *ListMultipartUploadsOutput) SetEncodingType(v string) *ListMultipartUploadsOutput { + s.EncodingType = &v return s } -// SetIncludedObjectVersions sets the IncludedObjectVersions field's value. -func (s *InventoryConfiguration) SetIncludedObjectVersions(v string) *InventoryConfiguration { - s.IncludedObjectVersions = &v +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListMultipartUploadsOutput) SetIsTruncated(v bool) *ListMultipartUploadsOutput { + s.IsTruncated = &v return s } -// SetIsEnabled sets the IsEnabled field's value. -func (s *InventoryConfiguration) SetIsEnabled(v bool) *InventoryConfiguration { - s.IsEnabled = &v +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListMultipartUploadsOutput) SetKeyMarker(v string) *ListMultipartUploadsOutput { + s.KeyMarker = &v return s } -// SetOptionalFields sets the OptionalFields field's value. -func (s *InventoryConfiguration) SetOptionalFields(v []*string) *InventoryConfiguration { - s.OptionalFields = v +// SetMaxUploads sets the MaxUploads field's value. +func (s *ListMultipartUploadsOutput) SetMaxUploads(v int64) *ListMultipartUploadsOutput { + s.MaxUploads = &v return s } -// SetSchedule sets the Schedule field's value. -func (s *InventoryConfiguration) SetSchedule(v *InventorySchedule) *InventoryConfiguration { - s.Schedule = v +// SetNextKeyMarker sets the NextKeyMarker field's value. +func (s *ListMultipartUploadsOutput) SetNextKeyMarker(v string) *ListMultipartUploadsOutput { + s.NextKeyMarker = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryDestination -type InventoryDestination struct { +// SetNextUploadIdMarker sets the NextUploadIdMarker field's value. +func (s *ListMultipartUploadsOutput) SetNextUploadIdMarker(v string) *ListMultipartUploadsOutput { + s.NextUploadIdMarker = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListMultipartUploadsOutput) SetPrefix(v string) *ListMultipartUploadsOutput { + s.Prefix = &v + return s +} + +// SetUploadIdMarker sets the UploadIdMarker field's value. +func (s *ListMultipartUploadsOutput) SetUploadIdMarker(v string) *ListMultipartUploadsOutput { + s.UploadIdMarker = &v + return s +} + +// SetUploads sets the Uploads field's value. +func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMultipartUploadsOutput { + s.Uploads = v + return s +} + +type ListObjectVersionsInput struct { _ struct{} `type:"structure"` - // Contains the bucket name, file format, bucket owner (optional), and prefix - // (optional) where inventory results are published. - // - // S3BucketDestination is a required field - S3BucketDestination *InventoryS3BucketDestination `type:"structure" required:"true"` + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Specifies the key to start with when listing objects in a bucket. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Specifies the object version you want to start listing from. + VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"` } // String returns the string representation -func (s InventoryDestination) String() string { +func (s ListObjectVersionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s InventoryDestination) GoString() string { +func (s ListObjectVersionsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *InventoryDestination) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InventoryDestination"} - if s.S3BucketDestination == nil { - invalidParams.Add(request.NewErrParamRequired("S3BucketDestination")) +func (s *ListObjectVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectVersionsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.S3BucketDestination != nil { - if err := s.S3BucketDestination.Validate(); err != nil { - invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams)) - } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) } if invalidParams.Len() > 0 { @@ -12209,229 +16235,233 @@ func (s *InventoryDestination) Validate() error { return nil } -// SetS3BucketDestination sets the S3BucketDestination field's value. -func (s *InventoryDestination) SetS3BucketDestination(v *InventoryS3BucketDestination) *InventoryDestination { - s.S3BucketDestination = v +// SetBucket sets the Bucket field's value. +func (s *ListObjectVersionsInput) SetBucket(v string) *ListObjectVersionsInput { + s.Bucket = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryFilter -type InventoryFilter struct { - _ struct{} `type:"structure"` - - // The prefix that an object must have to be included in the inventory results. - // - // Prefix is a required field - Prefix *string `type:"string" required:"true"` +func (s *ListObjectVersionsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// String returns the string representation -func (s InventoryFilter) String() string { - return awsutil.Prettify(s) +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectVersionsInput) SetDelimiter(v string) *ListObjectVersionsInput { + s.Delimiter = &v + return s } -// GoString returns the string representation -func (s InventoryFilter) GoString() string { - return s.String() +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectVersionsInput) SetEncodingType(v string) *ListObjectVersionsInput { + s.EncodingType = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *InventoryFilter) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InventoryFilter"} - if s.Prefix == nil { - invalidParams.Add(request.NewErrParamRequired("Prefix")) - } +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListObjectVersionsInput) SetKeyMarker(v string) *ListObjectVersionsInput { + s.KeyMarker = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectVersionsInput) SetMaxKeys(v int64) *ListObjectVersionsInput { + s.MaxKeys = &v + return s } // SetPrefix sets the Prefix field's value. -func (s *InventoryFilter) SetPrefix(v string) *InventoryFilter { +func (s *ListObjectVersionsInput) SetPrefix(v string) *ListObjectVersionsInput { s.Prefix = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryS3BucketDestination -type InventoryS3BucketDestination struct { +// SetVersionIdMarker sets the VersionIdMarker field's value. +func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersionsInput { + s.VersionIdMarker = &v + return s +} + +type ListObjectVersionsOutput struct { _ struct{} `type:"structure"` - // The ID of the account that owns the destination bucket. - AccountId *string `type:"string"` + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` - // The Amazon resource name (ARN) of the bucket where inventory results will - // be published. - // - // Bucket is a required field - Bucket *string `type:"string" required:"true"` + DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. If your results were truncated, you can + // make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker + // response parameters as a starting place in another request to return the + // rest of the results. + IsTruncated *bool `type:"boolean"` + + // Marks the last Key returned in a truncated response. + KeyMarker *string `type:"string"` + + MaxKeys *int64 `type:"integer"` + + Name *string `type:"string"` + + // Use this value for the key marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` - // Specifies the output format of the inventory results. - // - // Format is a required field - Format *string `type:"string" required:"true" enum:"InventoryFormat"` + // Use this value for the next version id marker parameter in a subsequent request. + NextVersionIdMarker *string `type:"string"` - // The prefix that is prepended to all inventory results. Prefix *string `type:"string"` + + VersionIdMarker *string `type:"string"` + + Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` } // String returns the string representation -func (s InventoryS3BucketDestination) String() string { +func (s ListObjectVersionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s InventoryS3BucketDestination) GoString() string { +func (s ListObjectVersionsOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *InventoryS3BucketDestination) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InventoryS3BucketDestination"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Format == nil { - invalidParams.Add(request.NewErrParamRequired("Format")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccountId sets the AccountId field's value. -func (s *InventoryS3BucketDestination) SetAccountId(v string) *InventoryS3BucketDestination { - s.AccountId = &v +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectVersionsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectVersionsOutput { + s.CommonPrefixes = v return s } -// SetBucket sets the Bucket field's value. -func (s *InventoryS3BucketDestination) SetBucket(v string) *InventoryS3BucketDestination { - s.Bucket = &v +// SetDeleteMarkers sets the DeleteMarkers field's value. +func (s *ListObjectVersionsOutput) SetDeleteMarkers(v []*DeleteMarkerEntry) *ListObjectVersionsOutput { + s.DeleteMarkers = v return s } -// SetFormat sets the Format field's value. -func (s *InventoryS3BucketDestination) SetFormat(v string) *InventoryS3BucketDestination { - s.Format = &v +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectVersionsOutput) SetDelimiter(v string) *ListObjectVersionsOutput { + s.Delimiter = &v return s } -// SetPrefix sets the Prefix field's value. -func (s *InventoryS3BucketDestination) SetPrefix(v string) *InventoryS3BucketDestination { - s.Prefix = &v +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectVersionsOutput) SetEncodingType(v string) *ListObjectVersionsOutput { + s.EncodingType = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventorySchedule -type InventorySchedule struct { - _ struct{} `type:"structure"` - - // Specifies how frequently inventory results are produced. - // - // Frequency is a required field - Frequency *string `type:"string" required:"true" enum:"InventoryFrequency"` +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectVersionsOutput) SetIsTruncated(v bool) *ListObjectVersionsOutput { + s.IsTruncated = &v + return s } -// String returns the string representation -func (s InventorySchedule) String() string { - return awsutil.Prettify(s) +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListObjectVersionsOutput) SetKeyMarker(v string) *ListObjectVersionsOutput { + s.KeyMarker = &v + return s } -// GoString returns the string representation -func (s InventorySchedule) GoString() string { - return s.String() +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectVersionsOutput) SetMaxKeys(v int64) *ListObjectVersionsOutput { + s.MaxKeys = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *InventorySchedule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InventorySchedule"} - if s.Frequency == nil { - invalidParams.Add(request.NewErrParamRequired("Frequency")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetName sets the Name field's value. +func (s *ListObjectVersionsOutput) SetName(v string) *ListObjectVersionsOutput { + s.Name = &v + return s } -// SetFrequency sets the Frequency field's value. -func (s *InventorySchedule) SetFrequency(v string) *InventorySchedule { - s.Frequency = &v +// SetNextKeyMarker sets the NextKeyMarker field's value. +func (s *ListObjectVersionsOutput) SetNextKeyMarker(v string) *ListObjectVersionsOutput { + s.NextKeyMarker = &v return s } -// Container for object key name prefix and suffix filtering rules. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/S3KeyFilter -type KeyFilter struct { - _ struct{} `type:"structure"` - - // A list of containers for key value pair that defines the criteria for the - // filter rule. - FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"` +// SetNextVersionIdMarker sets the NextVersionIdMarker field's value. +func (s *ListObjectVersionsOutput) SetNextVersionIdMarker(v string) *ListObjectVersionsOutput { + s.NextVersionIdMarker = &v + return s } -// String returns the string representation -func (s KeyFilter) String() string { - return awsutil.Prettify(s) +// SetPrefix sets the Prefix field's value. +func (s *ListObjectVersionsOutput) SetPrefix(v string) *ListObjectVersionsOutput { + s.Prefix = &v + return s } -// GoString returns the string representation -func (s KeyFilter) GoString() string { - return s.String() +// SetVersionIdMarker sets the VersionIdMarker field's value. +func (s *ListObjectVersionsOutput) SetVersionIdMarker(v string) *ListObjectVersionsOutput { + s.VersionIdMarker = &v + return s } -// SetFilterRules sets the FilterRules field's value. -func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter { - s.FilterRules = v +// SetVersions sets the Versions field's value. +func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVersionsOutput { + s.Versions = v return s } -// Container for specifying the AWS Lambda notification configuration. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LambdaFunctionConfiguration -type LambdaFunctionConfiguration struct { +type ListObjectsInput struct { _ struct{} `type:"structure"` - // Events is a required field - Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Container for object key name filtering rules. For information about key - // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - Filter *NotificationConfigurationFilter `type:"structure"` + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` - // Optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - // Lambda cloud function ARN that Amazon S3 can invoke when it detects events - // of the specified type. - // - // LambdaFunctionArn is a required field - LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` + // Specifies the key to start with when listing objects in a bucket. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // list objects request. Bucket owners need not specify this parameter in their + // requests. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` } // String returns the string representation -func (s LambdaFunctionConfiguration) String() string { +func (s ListObjectsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s LambdaFunctionConfiguration) GoString() string { +func (s ListObjectsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *LambdaFunctionConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LambdaFunctionConfiguration"} - if s.Events == nil { - invalidParams.Add(request.NewErrParamRequired("Events")) +func (s *ListObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.LambdaFunctionArn == nil { - invalidParams.Add(request.NewErrParamRequired("LambdaFunctionArn")) + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) } if invalidParams.Len() > 0 { @@ -12440,183 +16470,218 @@ func (s *LambdaFunctionConfiguration) Validate() error { return nil } -// SetEvents sets the Events field's value. -func (s *LambdaFunctionConfiguration) SetEvents(v []*string) *LambdaFunctionConfiguration { - s.Events = v +// SetBucket sets the Bucket field's value. +func (s *ListObjectsInput) SetBucket(v string) *ListObjectsInput { + s.Bucket = &v return s } -// SetFilter sets the Filter field's value. -func (s *LambdaFunctionConfiguration) SetFilter(v *NotificationConfigurationFilter) *LambdaFunctionConfiguration { - s.Filter = v +func (s *ListObjectsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsInput) SetDelimiter(v string) *ListObjectsInput { + s.Delimiter = &v return s } -// SetId sets the Id field's value. -func (s *LambdaFunctionConfiguration) SetId(v string) *LambdaFunctionConfiguration { - s.Id = &v +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsInput) SetEncodingType(v string) *ListObjectsInput { + s.EncodingType = &v return s } -// SetLambdaFunctionArn sets the LambdaFunctionArn field's value. -func (s *LambdaFunctionConfiguration) SetLambdaFunctionArn(v string) *LambdaFunctionConfiguration { - s.LambdaFunctionArn = &v +// SetMarker sets the Marker field's value. +func (s *ListObjectsInput) SetMarker(v string) *ListObjectsInput { + s.Marker = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleConfiguration -type LifecycleConfiguration struct { +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsInput) SetMaxKeys(v int64) *ListObjectsInput { + s.MaxKeys = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsInput) SetPrefix(v string) *ListObjectsInput { + s.Prefix = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput { + s.RequestPayer = &v + return s +} + +type ListObjectsOutput struct { _ struct{} `type:"structure"` - // Rules is a required field - Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"` + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + Contents []*Object `type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. + IsTruncated *bool `type:"boolean"` + + Marker *string `type:"string"` + + MaxKeys *int64 `type:"integer"` + + Name *string `type:"string"` + + // When response is truncated (the IsTruncated element value in the response + // is true), you can use the key name in this field as marker in the subsequent + // request to get next set of objects. Amazon S3 lists objects in alphabetical + // order Note: This element is returned only if you have delimiter request parameter + // specified. If response does not include the NextMaker and it is truncated, + // you can use the value of the last Key in the response as the marker in the + // subsequent request to get the next set of object keys. + NextMarker *string `type:"string"` + + Prefix *string `type:"string"` } // String returns the string representation -func (s LifecycleConfiguration) String() string { +func (s ListObjectsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s LifecycleConfiguration) GoString() string { +func (s ListObjectsOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *LifecycleConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LifecycleConfiguration"} - if s.Rules == nil { - invalidParams.Add(request.NewErrParamRequired("Rules")) - } - if s.Rules != nil { - for i, v := range s.Rules { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsOutput { + s.CommonPrefixes = v + return s } -// SetRules sets the Rules field's value. -func (s *LifecycleConfiguration) SetRules(v []*Rule) *LifecycleConfiguration { - s.Rules = v +// SetContents sets the Contents field's value. +func (s *ListObjectsOutput) SetContents(v []*Object) *ListObjectsOutput { + s.Contents = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleExpiration -type LifecycleExpiration struct { - _ struct{} `type:"structure"` - - // Indicates at what date the object is to be moved or deleted. Should be in - // GMT ISO 8601 Format. - Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsOutput) SetDelimiter(v string) *ListObjectsOutput { + s.Delimiter = &v + return s +} - // Indicates the lifetime, in days, of the objects that are subject to the rule. - // The value must be a non-zero positive integer. - Days *int64 `type:"integer"` +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsOutput) SetEncodingType(v string) *ListObjectsOutput { + s.EncodingType = &v + return s +} - // Indicates whether Amazon S3 will remove a delete marker with no noncurrent - // versions. If set to true, the delete marker will be expired; if set to false - // the policy takes no action. This cannot be specified with Days or Date in - // a Lifecycle Expiration Policy. - ExpiredObjectDeleteMarker *bool `type:"boolean"` +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectsOutput) SetIsTruncated(v bool) *ListObjectsOutput { + s.IsTruncated = &v + return s } -// String returns the string representation -func (s LifecycleExpiration) String() string { - return awsutil.Prettify(s) +// SetMarker sets the Marker field's value. +func (s *ListObjectsOutput) SetMarker(v string) *ListObjectsOutput { + s.Marker = &v + return s } -// GoString returns the string representation -func (s LifecycleExpiration) GoString() string { - return s.String() +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsOutput) SetMaxKeys(v int64) *ListObjectsOutput { + s.MaxKeys = &v + return s } -// SetDate sets the Date field's value. -func (s *LifecycleExpiration) SetDate(v time.Time) *LifecycleExpiration { - s.Date = &v +// SetName sets the Name field's value. +func (s *ListObjectsOutput) SetName(v string) *ListObjectsOutput { + s.Name = &v return s } -// SetDays sets the Days field's value. -func (s *LifecycleExpiration) SetDays(v int64) *LifecycleExpiration { - s.Days = &v +// SetNextMarker sets the NextMarker field's value. +func (s *ListObjectsOutput) SetNextMarker(v string) *ListObjectsOutput { + s.NextMarker = &v return s } -// SetExpiredObjectDeleteMarker sets the ExpiredObjectDeleteMarker field's value. -func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExpiration { - s.ExpiredObjectDeleteMarker = &v +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput { + s.Prefix = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRule -type LifecycleRule struct { +type ListObjectsV2Input struct { _ struct{} `type:"structure"` - // Specifies the days since the initiation of an Incomplete Multipart Upload - // that Lifecycle will wait before permanently removing all parts of the upload. - AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + // Name of the bucket to list. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - Expiration *LifecycleExpiration `type:"structure"` + // ContinuationToken indicates Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` - // The Filter is used to identify objects that a Lifecycle Rule applies to. - // A Filter must have exactly one of Prefix, Tag, or And specified. - Filter *LifecycleRuleFilter `type:"structure"` + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` - // Unique identifier for the rule. The value cannot be longer than 255 characters. - ID *string `type:"string"` + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - // Specifies when noncurrent object versions expire. Upon expiration, Amazon - // S3 permanently deletes the noncurrent object versions. You set this lifecycle - // configuration action on a bucket that has versioning enabled (or suspended) - // to request that Amazon S3 delete noncurrent object versions at a specific - // period in the object's lifetime. - NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + // The owner field is not present in listV2 by default, if you want to return + // owner field with each key in the result then set the fetch owner field to + // true + FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` - NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` - // Prefix identifying one or more objects to which the rule applies. This is - // deprecated; use Filter instead. - Prefix *string `deprecated:"true" type:"string"` + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` - // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule - // is not currently being applied. - // - // Status is a required field - Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + // Confirms that the requester knows that she or he will be charged for the + // list objects request in V2 style. Bucket owners need not specify this parameter + // in their requests. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` + // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. StartAfter can be any key in the bucket + StartAfter *string `location:"querystring" locationName:"start-after" type:"string"` } // String returns the string representation -func (s LifecycleRule) String() string { +func (s ListObjectsV2Input) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s LifecycleRule) GoString() string { +func (s ListObjectsV2Input) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *LifecycleRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LifecycleRule"} - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) +func (s *ListObjectsV2Input) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectsV2Input"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Filter != nil { - if err := s.Filter.Validate(); err != nil { - invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) - } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) } if invalidParams.Len() > 0 { @@ -12625,212 +16690,256 @@ func (s *LifecycleRule) Validate() error { return nil } -// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. -func (s *LifecycleRule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *LifecycleRule { - s.AbortIncompleteMultipartUpload = v +// SetBucket sets the Bucket field's value. +func (s *ListObjectsV2Input) SetBucket(v string) *ListObjectsV2Input { + s.Bucket = &v return s } -// SetExpiration sets the Expiration field's value. -func (s *LifecycleRule) SetExpiration(v *LifecycleExpiration) *LifecycleRule { - s.Expiration = v +func (s *ListObjectsV2Input) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListObjectsV2Input) SetContinuationToken(v string) *ListObjectsV2Input { + s.ContinuationToken = &v return s } -// SetFilter sets the Filter field's value. -func (s *LifecycleRule) SetFilter(v *LifecycleRuleFilter) *LifecycleRule { - s.Filter = v +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsV2Input) SetDelimiter(v string) *ListObjectsV2Input { + s.Delimiter = &v return s } -// SetID sets the ID field's value. -func (s *LifecycleRule) SetID(v string) *LifecycleRule { - s.ID = &v +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsV2Input) SetEncodingType(v string) *ListObjectsV2Input { + s.EncodingType = &v return s } -// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. -func (s *LifecycleRule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *LifecycleRule { - s.NoncurrentVersionExpiration = v +// SetFetchOwner sets the FetchOwner field's value. +func (s *ListObjectsV2Input) SetFetchOwner(v bool) *ListObjectsV2Input { + s.FetchOwner = &v return s } -// SetNoncurrentVersionTransitions sets the NoncurrentVersionTransitions field's value. -func (s *LifecycleRule) SetNoncurrentVersionTransitions(v []*NoncurrentVersionTransition) *LifecycleRule { - s.NoncurrentVersionTransitions = v +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsV2Input) SetMaxKeys(v int64) *ListObjectsV2Input { + s.MaxKeys = &v return s } // SetPrefix sets the Prefix field's value. -func (s *LifecycleRule) SetPrefix(v string) *LifecycleRule { +func (s *ListObjectsV2Input) SetPrefix(v string) *ListObjectsV2Input { s.Prefix = &v return s } -// SetStatus sets the Status field's value. -func (s *LifecycleRule) SetStatus(v string) *LifecycleRule { - s.Status = &v +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListObjectsV2Input) SetRequestPayer(v string) *ListObjectsV2Input { + s.RequestPayer = &v return s } -// SetTransitions sets the Transitions field's value. -func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule { - s.Transitions = v +// SetStartAfter sets the StartAfter field's value. +func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input { + s.StartAfter = &v return s } -// This is used in a Lifecycle Rule Filter to apply a logical AND to two or -// more predicates. The Lifecycle Rule will apply to any object matching all -// of the predicates configured inside the And operator. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleAndOperator -type LifecycleRuleAndOperator struct { - _ struct{} `type:"structure"` +type ListObjectsV2Output struct { + _ struct{} `type:"structure"` + + // CommonPrefixes contains all (if there are any) keys between Prefix and the + // next occurrence of the string specified by delimiter + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Metadata about each object returned. + Contents []*Object `type:"list" flattened:"true"` + + // ContinuationToken indicates Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key + ContinuationToken *string `type:"string"` + + // A delimiter is a character you use to group keys. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. + IsTruncated *bool `type:"boolean"` + + // KeyCount is the number of keys returned with this request. KeyCount will + // always be less than equals to MaxKeys field. Say you ask for 50 keys, your + // result will include less than equals 50 keys + KeyCount *int64 `type:"integer"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `type:"integer"` + + // Name of the bucket to list. + Name *string `type:"string"` + + // NextContinuationToken is sent when isTruncated is true which means there + // are more keys in the bucket that can be listed. The next list requests to + // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken + // is obfuscated and is not a real key + NextContinuationToken *string `type:"string"` + // Limits the response to keys that begin with the specified prefix. Prefix *string `type:"string"` - // All of these tags must exist in the object's tag set in order for the rule - // to apply. - Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` + // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. StartAfter can be any key in the bucket + StartAfter *string `type:"string"` } // String returns the string representation -func (s LifecycleRuleAndOperator) String() string { +func (s ListObjectsV2Output) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s LifecycleRuleAndOperator) GoString() string { +func (s ListObjectsV2Output) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *LifecycleRuleAndOperator) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleAndOperator"} - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectsV2Output) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsV2Output { + s.CommonPrefixes = v + return s } -// SetPrefix sets the Prefix field's value. -func (s *LifecycleRuleAndOperator) SetPrefix(v string) *LifecycleRuleAndOperator { - s.Prefix = &v +// SetContents sets the Contents field's value. +func (s *ListObjectsV2Output) SetContents(v []*Object) *ListObjectsV2Output { + s.Contents = v return s } -// SetTags sets the Tags field's value. -func (s *LifecycleRuleAndOperator) SetTags(v []*Tag) *LifecycleRuleAndOperator { - s.Tags = v +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListObjectsV2Output) SetContinuationToken(v string) *ListObjectsV2Output { + s.ContinuationToken = &v return s } -// The Filter is used to identify objects that a Lifecycle Rule applies to. -// A Filter must have exactly one of Prefix, Tag, or And specified. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleFilter -type LifecycleRuleFilter struct { - _ struct{} `type:"structure"` - - // This is used in a Lifecycle Rule Filter to apply a logical AND to two or - // more predicates. The Lifecycle Rule will apply to any object matching all - // of the predicates configured inside the And operator. - And *LifecycleRuleAndOperator `type:"structure"` - - // Prefix identifying one or more objects to which the rule applies. - Prefix *string `type:"string"` +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsV2Output) SetDelimiter(v string) *ListObjectsV2Output { + s.Delimiter = &v + return s +} - // This tag must exist in the object's tag set in order for the rule to apply. - Tag *Tag `type:"structure"` +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsV2Output) SetEncodingType(v string) *ListObjectsV2Output { + s.EncodingType = &v + return s } -// String returns the string representation -func (s LifecycleRuleFilter) String() string { - return awsutil.Prettify(s) +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectsV2Output) SetIsTruncated(v bool) *ListObjectsV2Output { + s.IsTruncated = &v + return s } -// GoString returns the string representation -func (s LifecycleRuleFilter) GoString() string { - return s.String() +// SetKeyCount sets the KeyCount field's value. +func (s *ListObjectsV2Output) SetKeyCount(v int64) *ListObjectsV2Output { + s.KeyCount = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *LifecycleRuleFilter) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleFilter"} - if s.And != nil { - if err := s.And.Validate(); err != nil { - invalidParams.AddNested("And", err.(request.ErrInvalidParams)) - } - } - if s.Tag != nil { - if err := s.Tag.Validate(); err != nil { - invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) - } - } +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsV2Output) SetMaxKeys(v int64) *ListObjectsV2Output { + s.MaxKeys = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetName sets the Name field's value. +func (s *ListObjectsV2Output) SetName(v string) *ListObjectsV2Output { + s.Name = &v + return s } -// SetAnd sets the And field's value. -func (s *LifecycleRuleFilter) SetAnd(v *LifecycleRuleAndOperator) *LifecycleRuleFilter { - s.And = v +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListObjectsV2Output) SetNextContinuationToken(v string) *ListObjectsV2Output { + s.NextContinuationToken = &v return s } // SetPrefix sets the Prefix field's value. -func (s *LifecycleRuleFilter) SetPrefix(v string) *LifecycleRuleFilter { +func (s *ListObjectsV2Output) SetPrefix(v string) *ListObjectsV2Output { s.Prefix = &v return s } -// SetTag sets the Tag field's value. -func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter { - s.Tag = v +// SetStartAfter sets the StartAfter field's value. +func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output { + s.StartAfter = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsRequest -type ListBucketAnalyticsConfigurationsInput struct { +type ListPartsInput struct { _ struct{} `type:"structure"` - // The name of the bucket from which analytics configurations are retrieved. - // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The ContinuationToken that represents a placeholder from where this request - // should begin. - ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Sets the maximum number of parts to return. + MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"` + + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. + PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Upload ID identifying the multipart upload whose parts are being listed. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` } // String returns the string representation -func (s ListBucketAnalyticsConfigurationsInput) String() string { +func (s ListPartsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListBucketAnalyticsConfigurationsInput) GoString() string { +func (s ListPartsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListBucketAnalyticsConfigurationsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListBucketAnalyticsConfigurationsInput"} +func (s *ListPartsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPartsInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } if invalidParams.Len() > 0 { return invalidParams @@ -12839,209 +16948,269 @@ func (s *ListBucketAnalyticsConfigurationsInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *ListBucketAnalyticsConfigurationsInput) SetBucket(v string) *ListBucketAnalyticsConfigurationsInput { +func (s *ListPartsInput) SetBucket(v string) *ListPartsInput { s.Bucket = &v return s } -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsInput { - s.ContinuationToken = &v +func (s *ListPartsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *ListPartsInput) SetKey(v string) *ListPartsInput { + s.Key = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsOutput -type ListBucketAnalyticsConfigurationsOutput struct { +// SetMaxParts sets the MaxParts field's value. +func (s *ListPartsInput) SetMaxParts(v int64) *ListPartsInput { + s.MaxParts = &v + return s +} + +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *ListPartsInput) SetPartNumberMarker(v int64) *ListPartsInput { + s.PartNumberMarker = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListPartsInput) SetRequestPayer(v string) *ListPartsInput { + s.RequestPayer = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput { + s.UploadId = &v + return s +} + +type ListPartsOutput struct { _ struct{} `type:"structure"` - // The list of analytics configurations for a bucket. - AnalyticsConfigurationList []*AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"list" flattened:"true"` + // Date when multipart upload will become eligible for abort operation by lifecycle. + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` - // The ContinuationToken that represents where this request began. - ContinuationToken *string `type:"string"` + // Id of the lifecycle rule that makes a multipart upload eligible for abort + // operation. + AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` + + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` + + // Indicates whether the returned list of parts is truncated. + IsTruncated *bool `type:"boolean"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // Maximum number of parts that were allowed in the response. + MaxParts *int64 `type:"integer"` + + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the part-number-marker request parameter + // in a subsequent request. + NextPartNumberMarker *int64 `type:"integer"` + + Owner *Owner `type:"structure"` + + // Part number after which listing begins. + PartNumberMarker *int64 `type:"integer"` + + Parts []*Part `locationName:"Part" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - // Indicates whether the returned list of analytics configurations is complete. - // A value of true indicates that the list is not complete and the NextContinuationToken - // will be provided for a subsequent request. - IsTruncated *bool `type:"boolean"` + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` - // NextContinuationToken is sent when isTruncated is true, which indicates that - // there are more analytics configurations to list. The next request must include - // this NextContinuationToken. The token is obfuscated and is not a usable value. - NextContinuationToken *string `type:"string"` + // Upload ID identifying the multipart upload whose parts are being listed. + UploadId *string `type:"string"` } // String returns the string representation -func (s ListBucketAnalyticsConfigurationsOutput) String() string { +func (s ListPartsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListBucketAnalyticsConfigurationsOutput) GoString() string { +func (s ListPartsOutput) GoString() string { return s.String() } -// SetAnalyticsConfigurationList sets the AnalyticsConfigurationList field's value. -func (s *ListBucketAnalyticsConfigurationsOutput) SetAnalyticsConfigurationList(v []*AnalyticsConfiguration) *ListBucketAnalyticsConfigurationsOutput { - s.AnalyticsConfigurationList = v - return s -} - -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListBucketAnalyticsConfigurationsOutput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { - s.ContinuationToken = &v +// SetAbortDate sets the AbortDate field's value. +func (s *ListPartsOutput) SetAbortDate(v time.Time) *ListPartsOutput { + s.AbortDate = &v return s } -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListBucketAnalyticsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketAnalyticsConfigurationsOutput { - s.IsTruncated = &v +// SetAbortRuleId sets the AbortRuleId field's value. +func (s *ListPartsOutput) SetAbortRuleId(v string) *ListPartsOutput { + s.AbortRuleId = &v return s } -// SetNextContinuationToken sets the NextContinuationToken field's value. -func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { - s.NextContinuationToken = &v +// SetBucket sets the Bucket field's value. +func (s *ListPartsOutput) SetBucket(v string) *ListPartsOutput { + s.Bucket = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsRequest -type ListBucketInventoryConfigurationsInput struct { - _ struct{} `type:"structure"` - - // The name of the bucket containing the inventory configurations to retrieve. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The marker used to continue an inventory configuration listing that has been - // truncated. Use the NextContinuationToken from a previously truncated list - // response to continue the listing. The continuation token is an opaque value - // that Amazon S3 understands. - ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` -} - -// String returns the string representation -func (s ListBucketInventoryConfigurationsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListBucketInventoryConfigurationsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListBucketInventoryConfigurationsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListBucketInventoryConfigurationsInput"} +func (s *ListPartsOutput) getBucket() (v string) { if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams + return v } - return nil + return *s.Bucket } -// SetBucket sets the Bucket field's value. -func (s *ListBucketInventoryConfigurationsInput) SetBucket(v string) *ListBucketInventoryConfigurationsInput { - s.Bucket = &v +// SetInitiator sets the Initiator field's value. +func (s *ListPartsOutput) SetInitiator(v *Initiator) *ListPartsOutput { + s.Initiator = v return s } -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsInput { - s.ContinuationToken = &v +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListPartsOutput) SetIsTruncated(v bool) *ListPartsOutput { + s.IsTruncated = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsOutput -type ListBucketInventoryConfigurationsOutput struct { - _ struct{} `type:"structure"` - - // If sent in the request, the marker that is used as a starting point for this - // inventory configuration list response. - ContinuationToken *string `type:"string"` - - // The list of inventory configurations for a bucket. - InventoryConfigurationList []*InventoryConfiguration `locationName:"InventoryConfiguration" type:"list" flattened:"true"` +// SetKey sets the Key field's value. +func (s *ListPartsOutput) SetKey(v string) *ListPartsOutput { + s.Key = &v + return s +} - // Indicates whether the returned list of inventory configurations is truncated - // in this response. A value of true indicates that the list is truncated. - IsTruncated *bool `type:"boolean"` +// SetMaxParts sets the MaxParts field's value. +func (s *ListPartsOutput) SetMaxParts(v int64) *ListPartsOutput { + s.MaxParts = &v + return s +} - // The marker used to continue this inventory configuration listing. Use the - // NextContinuationToken from this response to continue the listing in a subsequent - // request. The continuation token is an opaque value that Amazon S3 understands. - NextContinuationToken *string `type:"string"` +// SetNextPartNumberMarker sets the NextPartNumberMarker field's value. +func (s *ListPartsOutput) SetNextPartNumberMarker(v int64) *ListPartsOutput { + s.NextPartNumberMarker = &v + return s } -// String returns the string representation -func (s ListBucketInventoryConfigurationsOutput) String() string { - return awsutil.Prettify(s) +// SetOwner sets the Owner field's value. +func (s *ListPartsOutput) SetOwner(v *Owner) *ListPartsOutput { + s.Owner = v + return s } -// GoString returns the string representation -func (s ListBucketInventoryConfigurationsOutput) GoString() string { - return s.String() +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *ListPartsOutput) SetPartNumberMarker(v int64) *ListPartsOutput { + s.PartNumberMarker = &v + return s } -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListBucketInventoryConfigurationsOutput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsOutput { - s.ContinuationToken = &v +// SetParts sets the Parts field's value. +func (s *ListPartsOutput) SetParts(v []*Part) *ListPartsOutput { + s.Parts = v return s } -// SetInventoryConfigurationList sets the InventoryConfigurationList field's value. -func (s *ListBucketInventoryConfigurationsOutput) SetInventoryConfigurationList(v []*InventoryConfiguration) *ListBucketInventoryConfigurationsOutput { - s.InventoryConfigurationList = v +// SetRequestCharged sets the RequestCharged field's value. +func (s *ListPartsOutput) SetRequestCharged(v string) *ListPartsOutput { + s.RequestCharged = &v return s } -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListBucketInventoryConfigurationsOutput) SetIsTruncated(v bool) *ListBucketInventoryConfigurationsOutput { - s.IsTruncated = &v +// SetStorageClass sets the StorageClass field's value. +func (s *ListPartsOutput) SetStorageClass(v string) *ListPartsOutput { + s.StorageClass = &v return s } -// SetNextContinuationToken sets the NextContinuationToken field's value. -func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketInventoryConfigurationsOutput { - s.NextContinuationToken = &v +// SetUploadId sets the UploadId field's value. +func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput { + s.UploadId = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsRequest -type ListBucketMetricsConfigurationsInput struct { +// Describes an S3 location that will receive the results of the restore request. +type Location struct { _ struct{} `type:"structure"` - // The name of the bucket containing the metrics configurations to retrieve. + // A list of grants that control access to the staged results. + AccessControlList []*Grant `locationNameList:"Grant" type:"list"` + + // The name of the bucket where the restore results will be placed. // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // BucketName is a required field + BucketName *string `type:"string" required:"true"` - // The marker that is used to continue a metrics configuration listing that - // has been truncated. Use the NextContinuationToken from a previously truncated - // list response to continue the listing. The continuation token is an opaque - // value that Amazon S3 understands. - ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + // The canned ACL to apply to the restore results. + CannedACL *string `type:"string" enum:"ObjectCannedACL"` + + // Describes the server-side encryption that will be applied to the restore + // results. + Encryption *Encryption `type:"structure"` + + // The prefix that is prepended to the restore results for this request. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` + + // The class of storage used to store the restore results. + StorageClass *string `type:"string" enum:"StorageClass"` + + // The tag-set that is applied to the restore results. + Tagging *Tagging `type:"structure"` + + // A list of metadata to store with the restore results in S3. + UserMetadata []*MetadataEntry `locationNameList:"MetadataEntry" type:"list"` } // String returns the string representation -func (s ListBucketMetricsConfigurationsInput) String() string { +func (s Location) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListBucketMetricsConfigurationsInput) GoString() string { +func (s Location) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListBucketMetricsConfigurationsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListBucketMetricsConfigurationsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) +func (s *Location) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Location"} + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.AccessControlList != nil { + for i, v := range s.AccessControlList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AccessControlList", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Encryption != nil { + if err := s.Encryption.Validate(); err != nil { + invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) + } + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -13050,173 +17219,250 @@ func (s *ListBucketMetricsConfigurationsInput) Validate() error { return nil } -// SetBucket sets the Bucket field's value. -func (s *ListBucketMetricsConfigurationsInput) SetBucket(v string) *ListBucketMetricsConfigurationsInput { - s.Bucket = &v +// SetAccessControlList sets the AccessControlList field's value. +func (s *Location) SetAccessControlList(v []*Grant) *Location { + s.AccessControlList = v return s } -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsInput { - s.ContinuationToken = &v +// SetBucketName sets the BucketName field's value. +func (s *Location) SetBucketName(v string) *Location { + s.BucketName = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsOutput -type ListBucketMetricsConfigurationsOutput struct { - _ struct{} `type:"structure"` +// SetCannedACL sets the CannedACL field's value. +func (s *Location) SetCannedACL(v string) *Location { + s.CannedACL = &v + return s +} - // The marker that is used as a starting point for this metrics configuration - // list response. This value is present if it was sent in the request. - ContinuationToken *string `type:"string"` +// SetEncryption sets the Encryption field's value. +func (s *Location) SetEncryption(v *Encryption) *Location { + s.Encryption = v + return s +} - // Indicates whether the returned list of metrics configurations is complete. - // A value of true indicates that the list is not complete and the NextContinuationToken - // will be provided for a subsequent request. - IsTruncated *bool `type:"boolean"` +// SetPrefix sets the Prefix field's value. +func (s *Location) SetPrefix(v string) *Location { + s.Prefix = &v + return s +} - // The list of metrics configurations for a bucket. - MetricsConfigurationList []*MetricsConfiguration `locationName:"MetricsConfiguration" type:"list" flattened:"true"` +// SetStorageClass sets the StorageClass field's value. +func (s *Location) SetStorageClass(v string) *Location { + s.StorageClass = &v + return s +} - // The marker used to continue a metrics configuration listing that has been - // truncated. Use the NextContinuationToken from a previously truncated list - // response to continue the listing. The continuation token is an opaque value - // that Amazon S3 understands. - NextContinuationToken *string `type:"string"` +// SetTagging sets the Tagging field's value. +func (s *Location) SetTagging(v *Tagging) *Location { + s.Tagging = v + return s +} + +// SetUserMetadata sets the UserMetadata field's value. +func (s *Location) SetUserMetadata(v []*MetadataEntry) *Location { + s.UserMetadata = v + return s +} + +// Container for logging information. Presence of this element indicates that +// logging is enabled. Parameters TargetBucket and TargetPrefix are required +// in this case. +type LoggingEnabled struct { + _ struct{} `type:"structure"` + + // Specifies the bucket where you want Amazon S3 to store server access logs. + // You can have your logs delivered to any bucket that you own, including the + // same bucket that is being logged. You can also configure multiple buckets + // to deliver their logs to the same target bucket. In this case you should + // choose a different TargetPrefix for each source bucket so that the delivered + // log files can be distinguished by key. + // + // TargetBucket is a required field + TargetBucket *string `type:"string" required:"true"` + + TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` + + // This element lets you specify a prefix for the keys that the log files will + // be stored under. + // + // TargetPrefix is a required field + TargetPrefix *string `type:"string" required:"true"` } // String returns the string representation -func (s ListBucketMetricsConfigurationsOutput) String() string { +func (s LoggingEnabled) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListBucketMetricsConfigurationsOutput) GoString() string { +func (s LoggingEnabled) GoString() string { return s.String() } -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListBucketMetricsConfigurationsOutput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsOutput { - s.ContinuationToken = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *LoggingEnabled) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"} + if s.TargetBucket == nil { + invalidParams.Add(request.NewErrParamRequired("TargetBucket")) + } + if s.TargetPrefix == nil { + invalidParams.Add(request.NewErrParamRequired("TargetPrefix")) + } + if s.TargetGrants != nil { + for i, v := range s.TargetGrants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetGrants", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListBucketMetricsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketMetricsConfigurationsOutput { - s.IsTruncated = &v +// SetTargetBucket sets the TargetBucket field's value. +func (s *LoggingEnabled) SetTargetBucket(v string) *LoggingEnabled { + s.TargetBucket = &v return s } -// SetMetricsConfigurationList sets the MetricsConfigurationList field's value. -func (s *ListBucketMetricsConfigurationsOutput) SetMetricsConfigurationList(v []*MetricsConfiguration) *ListBucketMetricsConfigurationsOutput { - s.MetricsConfigurationList = v +// SetTargetGrants sets the TargetGrants field's value. +func (s *LoggingEnabled) SetTargetGrants(v []*TargetGrant) *LoggingEnabled { + s.TargetGrants = v return s } -// SetNextContinuationToken sets the NextContinuationToken field's value. -func (s *ListBucketMetricsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketMetricsConfigurationsOutput { - s.NextContinuationToken = &v +// SetTargetPrefix sets the TargetPrefix field's value. +func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled { + s.TargetPrefix = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsInput -type ListBucketsInput struct { +// A metadata key-value pair to store with an object. +type MetadataEntry struct { _ struct{} `type:"structure"` + + Name *string `type:"string"` + + Value *string `type:"string"` } // String returns the string representation -func (s ListBucketsInput) String() string { +func (s MetadataEntry) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListBucketsInput) GoString() string { +func (s MetadataEntry) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsOutput -type ListBucketsOutput struct { +// SetName sets the Name field's value. +func (s *MetadataEntry) SetName(v string) *MetadataEntry { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *MetadataEntry) SetValue(v string) *MetadataEntry { + s.Value = &v + return s +} + +type MetricsAndOperator struct { _ struct{} `type:"structure"` - Buckets []*Bucket `locationNameList:"Bucket" type:"list"` + // The prefix used when evaluating an AND predicate. + Prefix *string `type:"string"` - Owner *Owner `type:"structure"` + // The list of tags used when evaluating an AND predicate. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` } // String returns the string representation -func (s ListBucketsOutput) String() string { +func (s MetricsAndOperator) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListBucketsOutput) GoString() string { +func (s MetricsAndOperator) GoString() string { return s.String() } -// SetBuckets sets the Buckets field's value. -func (s *ListBucketsOutput) SetBuckets(v []*Bucket) *ListBucketsOutput { - s.Buckets = v +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricsAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *MetricsAndOperator) SetPrefix(v string) *MetricsAndOperator { + s.Prefix = &v return s } -// SetOwner sets the Owner field's value. -func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput { - s.Owner = v +// SetTags sets the Tags field's value. +func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator { + s.Tags = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsRequest -type ListMultipartUploadsInput struct { +type MetricsConfiguration struct { _ struct{} `type:"structure"` - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Character you use to group keys. - Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` - - // Requests Amazon S3 to encode the object keys in the response and specifies - // the encoding method to use. An object key may contain any Unicode character; - // however, XML 1.0 parser cannot parse some characters, such as characters - // with an ASCII value from 0 to 10. For characters that are not supported in - // XML 1.0, you can add this parameter to request that Amazon S3 encode the - // keys in the response. - EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - - // Together with upload-id-marker, this parameter specifies the multipart upload - // after which listing should begin. - KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` - - // Sets the maximum number of multipart uploads, from 1 to 1,000, to return - // in the response body. 1,000 is the maximum number of uploads that can be - // returned in a response. - MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"` - - // Lists in-progress uploads only for those keys that begin with the specified - // prefix. - Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + // Specifies a metrics configuration filter. The metrics configuration will + // only include objects that meet the filter's criteria. A filter must be a + // prefix, a tag, or a conjunction (MetricsAndOperator). + Filter *MetricsFilter `type:"structure"` - // Together with key-marker, specifies the multipart upload after which listing - // should begin. If key-marker is not specified, the upload-id-marker parameter - // is ignored. - UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` } // String returns the string representation -func (s ListMultipartUploadsInput) String() string { +func (s MetricsConfiguration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListMultipartUploadsInput) GoString() string { +func (s MetricsConfiguration) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListMultipartUploadsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListMultipartUploadsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) +func (s *MetricsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsConfiguration"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -13225,222 +17471,276 @@ func (s *ListMultipartUploadsInput) Validate() error { return nil } -// SetBucket sets the Bucket field's value. -func (s *ListMultipartUploadsInput) SetBucket(v string) *ListMultipartUploadsInput { - s.Bucket = &v +// SetFilter sets the Filter field's value. +func (s *MetricsConfiguration) SetFilter(v *MetricsFilter) *MetricsConfiguration { + s.Filter = v return s } -// SetDelimiter sets the Delimiter field's value. -func (s *ListMultipartUploadsInput) SetDelimiter(v string) *ListMultipartUploadsInput { - s.Delimiter = &v +// SetId sets the Id field's value. +func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration { + s.Id = &v return s } -// SetEncodingType sets the EncodingType field's value. -func (s *ListMultipartUploadsInput) SetEncodingType(v string) *ListMultipartUploadsInput { - s.EncodingType = &v - return s -} +type MetricsFilter struct { + _ struct{} `type:"structure"` -// SetKeyMarker sets the KeyMarker field's value. -func (s *ListMultipartUploadsInput) SetKeyMarker(v string) *ListMultipartUploadsInput { - s.KeyMarker = &v - return s -} + // A conjunction (logical AND) of predicates, which is used in evaluating a + // metrics filter. The operator must have at least two predicates, and an object + // must match all of the predicates in order for the filter to apply. + And *MetricsAndOperator `type:"structure"` -// SetMaxUploads sets the MaxUploads field's value. -func (s *ListMultipartUploadsInput) SetMaxUploads(v int64) *ListMultipartUploadsInput { - s.MaxUploads = &v - return s -} + // The prefix used when evaluating a metrics filter. + Prefix *string `type:"string"` -// SetPrefix sets the Prefix field's value. -func (s *ListMultipartUploadsInput) SetPrefix(v string) *ListMultipartUploadsInput { - s.Prefix = &v - return s + // The tag used when evaluating a metrics filter. + Tag *Tag `type:"structure"` } -// SetUploadIdMarker sets the UploadIdMarker field's value. -func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUploadsInput { - s.UploadIdMarker = &v - return s +// String returns the string representation +func (s MetricsFilter) String() string { + return awsutil.Prettify(s) } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsOutput -type ListMultipartUploadsOutput struct { - _ struct{} `type:"structure"` +// GoString returns the string representation +func (s MetricsFilter) GoString() string { + return s.String() +} - // Name of the bucket to which the multipart upload was initiated. - Bucket *string `type:"string"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricsFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } - CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - Delimiter *string `type:"string"` +// SetAnd sets the And field's value. +func (s *MetricsFilter) SetAnd(v *MetricsAndOperator) *MetricsFilter { + s.And = v + return s +} - // Encoding type used by Amazon S3 to encode object keys in the response. - EncodingType *string `type:"string" enum:"EncodingType"` +// SetPrefix sets the Prefix field's value. +func (s *MetricsFilter) SetPrefix(v string) *MetricsFilter { + s.Prefix = &v + return s +} - // Indicates whether the returned list of multipart uploads is truncated. A - // value of true indicates that the list was truncated. The list can be truncated - // if the number of multipart uploads exceeds the limit allowed or specified - // by max uploads. - IsTruncated *bool `type:"boolean"` +// SetTag sets the Tag field's value. +func (s *MetricsFilter) SetTag(v *Tag) *MetricsFilter { + s.Tag = v + return s +} - // The key at or after which the listing began. - KeyMarker *string `type:"string"` +type MultipartUpload struct { + _ struct{} `type:"structure"` - // Maximum number of multipart uploads that could have been included in the - // response. - MaxUploads *int64 `type:"integer"` + // Date and time at which the multipart upload was initiated. + Initiated *time.Time `type:"timestamp"` - // When a list is truncated, this element specifies the value that should be - // used for the key-marker request parameter in a subsequent request. - NextKeyMarker *string `type:"string"` + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` - // When a list is truncated, this element specifies the value that should be - // used for the upload-id-marker request parameter in a subsequent request. - NextUploadIdMarker *string `type:"string"` + // Key of the object for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` - // When a prefix is provided in the request, this field contains the specified - // prefix. The result contains only keys starting with the specified prefix. - Prefix *string `type:"string"` + Owner *Owner `type:"structure"` - // Upload ID after which listing began. - UploadIdMarker *string `type:"string"` + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` - Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` + // Upload ID that identifies the multipart upload. + UploadId *string `type:"string"` } // String returns the string representation -func (s ListMultipartUploadsOutput) String() string { +func (s MultipartUpload) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListMultipartUploadsOutput) GoString() string { +func (s MultipartUpload) GoString() string { return s.String() } -// SetBucket sets the Bucket field's value. -func (s *ListMultipartUploadsOutput) SetBucket(v string) *ListMultipartUploadsOutput { - s.Bucket = &v +// SetInitiated sets the Initiated field's value. +func (s *MultipartUpload) SetInitiated(v time.Time) *MultipartUpload { + s.Initiated = &v return s } -// SetCommonPrefixes sets the CommonPrefixes field's value. -func (s *ListMultipartUploadsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListMultipartUploadsOutput { - s.CommonPrefixes = v +// SetInitiator sets the Initiator field's value. +func (s *MultipartUpload) SetInitiator(v *Initiator) *MultipartUpload { + s.Initiator = v return s } -// SetDelimiter sets the Delimiter field's value. -func (s *ListMultipartUploadsOutput) SetDelimiter(v string) *ListMultipartUploadsOutput { - s.Delimiter = &v +// SetKey sets the Key field's value. +func (s *MultipartUpload) SetKey(v string) *MultipartUpload { + s.Key = &v return s } -// SetEncodingType sets the EncodingType field's value. -func (s *ListMultipartUploadsOutput) SetEncodingType(v string) *ListMultipartUploadsOutput { - s.EncodingType = &v +// SetOwner sets the Owner field's value. +func (s *MultipartUpload) SetOwner(v *Owner) *MultipartUpload { + s.Owner = v return s } -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListMultipartUploadsOutput) SetIsTruncated(v bool) *ListMultipartUploadsOutput { - s.IsTruncated = &v +// SetStorageClass sets the StorageClass field's value. +func (s *MultipartUpload) SetStorageClass(v string) *MultipartUpload { + s.StorageClass = &v return s } -// SetKeyMarker sets the KeyMarker field's value. -func (s *ListMultipartUploadsOutput) SetKeyMarker(v string) *ListMultipartUploadsOutput { - s.KeyMarker = &v +// SetUploadId sets the UploadId field's value. +func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload { + s.UploadId = &v return s } -// SetMaxUploads sets the MaxUploads field's value. -func (s *ListMultipartUploadsOutput) SetMaxUploads(v int64) *ListMultipartUploadsOutput { - s.MaxUploads = &v - return s -} +// Specifies when noncurrent object versions expire. Upon expiration, Amazon +// S3 permanently deletes the noncurrent object versions. You set this lifecycle +// configuration action on a bucket that has versioning enabled (or suspended) +// to request that Amazon S3 delete noncurrent object versions at a specific +// period in the object's lifetime. +type NoncurrentVersionExpiration struct { + _ struct{} `type:"structure"` -// SetNextKeyMarker sets the NextKeyMarker field's value. -func (s *ListMultipartUploadsOutput) SetNextKeyMarker(v string) *ListMultipartUploadsOutput { - s.NextKeyMarker = &v - return s + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent + // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in + // the Amazon Simple Storage Service Developer Guide. + NoncurrentDays *int64 `type:"integer"` } -// SetNextUploadIdMarker sets the NextUploadIdMarker field's value. -func (s *ListMultipartUploadsOutput) SetNextUploadIdMarker(v string) *ListMultipartUploadsOutput { - s.NextUploadIdMarker = &v - return s +// String returns the string representation +func (s NoncurrentVersionExpiration) String() string { + return awsutil.Prettify(s) } -// SetPrefix sets the Prefix field's value. -func (s *ListMultipartUploadsOutput) SetPrefix(v string) *ListMultipartUploadsOutput { - s.Prefix = &v - return s +// GoString returns the string representation +func (s NoncurrentVersionExpiration) GoString() string { + return s.String() } -// SetUploadIdMarker sets the UploadIdMarker field's value. -func (s *ListMultipartUploadsOutput) SetUploadIdMarker(v string) *ListMultipartUploadsOutput { - s.UploadIdMarker = &v +// SetNoncurrentDays sets the NoncurrentDays field's value. +func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVersionExpiration { + s.NoncurrentDays = &v return s } -// SetUploads sets the Uploads field's value. -func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMultipartUploadsOutput { - s.Uploads = v - return s +// Container for the transition rule that describes when noncurrent objects +// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING or GLACIER +// storage class. If your bucket is versioning-enabled (or versioning is suspended), +// you can set this action to request that Amazon S3 transition noncurrent object +// versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING or GLACIER storage +// class at a specific period in the object's lifetime. +type NoncurrentVersionTransition struct { + _ struct{} `type:"structure"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent + // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in + // the Amazon Simple Storage Service Developer Guide. + NoncurrentDays *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsRequest -type ListObjectVersionsInput struct { - _ struct{} `type:"structure"` +// String returns the string representation +func (s NoncurrentVersionTransition) String() string { + return awsutil.Prettify(s) +} - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +// GoString returns the string representation +func (s NoncurrentVersionTransition) GoString() string { + return s.String() +} - // A delimiter is a character you use to group keys. - Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` +// SetNoncurrentDays sets the NoncurrentDays field's value. +func (s *NoncurrentVersionTransition) SetNoncurrentDays(v int64) *NoncurrentVersionTransition { + s.NoncurrentDays = &v + return s +} - // Requests Amazon S3 to encode the object keys in the response and specifies - // the encoding method to use. An object key may contain any Unicode character; - // however, XML 1.0 parser cannot parse some characters, such as characters - // with an ASCII value from 0 to 10. For characters that are not supported in - // XML 1.0, you can add this parameter to request that Amazon S3 encode the - // keys in the response. - EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` +// SetStorageClass sets the StorageClass field's value. +func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersionTransition { + s.StorageClass = &v + return s +} - // Specifies the key to start with when listing objects in a bucket. - KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` +// A container for specifying the notification configuration of the bucket. +// If this element is empty, notifications are turned off for the bucket. +type NotificationConfiguration struct { + _ struct{} `type:"structure"` - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. - MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` - // Limits the response to keys that begin with the specified prefix. - Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` - // Specifies the object version you want to start listing from. - VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"` + TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` } // String returns the string representation -func (s ListObjectVersionsInput) String() string { +func (s NotificationConfiguration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListObjectVersionsInput) GoString() string { +func (s NotificationConfiguration) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListObjectVersionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListObjectVersionsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) +func (s *NotificationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NotificationConfiguration"} + if s.LambdaFunctionConfigurations != nil { + for i, v := range s.LambdaFunctionConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LambdaFunctionConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.QueueConfigurations != nil { + for i, v := range s.QueueConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueueConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.TopicConfigurations != nil { + for i, v := range s.TopicConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TopicConfigurations", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -13449,686 +17749,784 @@ func (s *ListObjectVersionsInput) Validate() error { return nil } -// SetBucket sets the Bucket field's value. -func (s *ListObjectVersionsInput) SetBucket(v string) *ListObjectVersionsInput { - s.Bucket = &v +// SetLambdaFunctionConfigurations sets the LambdaFunctionConfigurations field's value. +func (s *NotificationConfiguration) SetLambdaFunctionConfigurations(v []*LambdaFunctionConfiguration) *NotificationConfiguration { + s.LambdaFunctionConfigurations = v return s } -// SetDelimiter sets the Delimiter field's value. -func (s *ListObjectVersionsInput) SetDelimiter(v string) *ListObjectVersionsInput { - s.Delimiter = &v +// SetQueueConfigurations sets the QueueConfigurations field's value. +func (s *NotificationConfiguration) SetQueueConfigurations(v []*QueueConfiguration) *NotificationConfiguration { + s.QueueConfigurations = v return s } -// SetEncodingType sets the EncodingType field's value. -func (s *ListObjectVersionsInput) SetEncodingType(v string) *ListObjectVersionsInput { - s.EncodingType = &v +// SetTopicConfigurations sets the TopicConfigurations field's value. +func (s *NotificationConfiguration) SetTopicConfigurations(v []*TopicConfiguration) *NotificationConfiguration { + s.TopicConfigurations = v return s } -// SetKeyMarker sets the KeyMarker field's value. -func (s *ListObjectVersionsInput) SetKeyMarker(v string) *ListObjectVersionsInput { - s.KeyMarker = &v - return s +type NotificationConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"` + + QueueConfiguration *QueueConfigurationDeprecated `type:"structure"` + + TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` } -// SetMaxKeys sets the MaxKeys field's value. -func (s *ListObjectVersionsInput) SetMaxKeys(v int64) *ListObjectVersionsInput { - s.MaxKeys = &v +// String returns the string representation +func (s NotificationConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfigurationDeprecated) GoString() string { + return s.String() +} + +// SetCloudFunctionConfiguration sets the CloudFunctionConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetCloudFunctionConfiguration(v *CloudFunctionConfiguration) *NotificationConfigurationDeprecated { + s.CloudFunctionConfiguration = v return s } -// SetPrefix sets the Prefix field's value. -func (s *ListObjectVersionsInput) SetPrefix(v string) *ListObjectVersionsInput { - s.Prefix = &v +// SetQueueConfiguration sets the QueueConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetQueueConfiguration(v *QueueConfigurationDeprecated) *NotificationConfigurationDeprecated { + s.QueueConfiguration = v return s } -// SetVersionIdMarker sets the VersionIdMarker field's value. -func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersionsInput { - s.VersionIdMarker = &v +// SetTopicConfiguration sets the TopicConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConfigurationDeprecated) *NotificationConfigurationDeprecated { + s.TopicConfiguration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsOutput -type ListObjectVersionsOutput struct { +// A container for object key name filtering rules. For information about key +// name filtering, see Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// in the Amazon Simple Storage Service Developer Guide. +type NotificationConfigurationFilter struct { _ struct{} `type:"structure"` - CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` - - DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"` - - Delimiter *string `type:"string"` + // A container for object key name prefix and suffix filtering rules. + Key *KeyFilter `locationName:"S3Key" type:"structure"` +} - // Encoding type used by Amazon S3 to encode object keys in the response. - EncodingType *string `type:"string" enum:"EncodingType"` +// String returns the string representation +func (s NotificationConfigurationFilter) String() string { + return awsutil.Prettify(s) +} - // A flag that indicates whether or not Amazon S3 returned all of the results - // that satisfied the search criteria. If your results were truncated, you can - // make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker - // response parameters as a starting place in another request to return the - // rest of the results. - IsTruncated *bool `type:"boolean"` +// GoString returns the string representation +func (s NotificationConfigurationFilter) GoString() string { + return s.String() +} - // Marks the last Key returned in a truncated response. - KeyMarker *string `type:"string"` +// SetKey sets the Key field's value. +func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConfigurationFilter { + s.Key = v + return s +} - MaxKeys *int64 `type:"integer"` +type Object struct { + _ struct{} `type:"structure"` - Name *string `type:"string"` + ETag *string `type:"string"` - // Use this value for the key marker request parameter in a subsequent request. - NextKeyMarker *string `type:"string"` + Key *string `min:"1" type:"string"` - // Use this value for the next version id marker parameter in a subsequent request. - NextVersionIdMarker *string `type:"string"` + LastModified *time.Time `type:"timestamp"` - Prefix *string `type:"string"` + Owner *Owner `type:"structure"` - VersionIdMarker *string `type:"string"` + Size *int64 `type:"integer"` - Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectStorageClass"` } // String returns the string representation -func (s ListObjectVersionsOutput) String() string { +func (s Object) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListObjectVersionsOutput) GoString() string { +func (s Object) GoString() string { return s.String() } -// SetCommonPrefixes sets the CommonPrefixes field's value. -func (s *ListObjectVersionsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectVersionsOutput { - s.CommonPrefixes = v +// SetETag sets the ETag field's value. +func (s *Object) SetETag(v string) *Object { + s.ETag = &v return s } -// SetDeleteMarkers sets the DeleteMarkers field's value. -func (s *ListObjectVersionsOutput) SetDeleteMarkers(v []*DeleteMarkerEntry) *ListObjectVersionsOutput { - s.DeleteMarkers = v +// SetKey sets the Key field's value. +func (s *Object) SetKey(v string) *Object { + s.Key = &v return s } -// SetDelimiter sets the Delimiter field's value. -func (s *ListObjectVersionsOutput) SetDelimiter(v string) *ListObjectVersionsOutput { - s.Delimiter = &v +// SetLastModified sets the LastModified field's value. +func (s *Object) SetLastModified(v time.Time) *Object { + s.LastModified = &v return s } -// SetEncodingType sets the EncodingType field's value. -func (s *ListObjectVersionsOutput) SetEncodingType(v string) *ListObjectVersionsOutput { - s.EncodingType = &v +// SetOwner sets the Owner field's value. +func (s *Object) SetOwner(v *Owner) *Object { + s.Owner = v return s } -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListObjectVersionsOutput) SetIsTruncated(v bool) *ListObjectVersionsOutput { - s.IsTruncated = &v +// SetSize sets the Size field's value. +func (s *Object) SetSize(v int64) *Object { + s.Size = &v return s } -// SetKeyMarker sets the KeyMarker field's value. -func (s *ListObjectVersionsOutput) SetKeyMarker(v string) *ListObjectVersionsOutput { - s.KeyMarker = &v +// SetStorageClass sets the StorageClass field's value. +func (s *Object) SetStorageClass(v string) *Object { + s.StorageClass = &v return s } -// SetMaxKeys sets the MaxKeys field's value. -func (s *ListObjectVersionsOutput) SetMaxKeys(v int64) *ListObjectVersionsOutput { - s.MaxKeys = &v - return s -} +type ObjectIdentifier struct { + _ struct{} `type:"structure"` -// SetName sets the Name field's value. -func (s *ListObjectVersionsOutput) SetName(v string) *ListObjectVersionsOutput { - s.Name = &v - return s + // Key name of the object to delete. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // VersionId for the specific version of the object to delete. + VersionId *string `type:"string"` } -// SetNextKeyMarker sets the NextKeyMarker field's value. -func (s *ListObjectVersionsOutput) SetNextKeyMarker(v string) *ListObjectVersionsOutput { - s.NextKeyMarker = &v - return s +// String returns the string representation +func (s ObjectIdentifier) String() string { + return awsutil.Prettify(s) } -// SetNextVersionIdMarker sets the NextVersionIdMarker field's value. -func (s *ListObjectVersionsOutput) SetNextVersionIdMarker(v string) *ListObjectVersionsOutput { - s.NextVersionIdMarker = &v - return s +// GoString returns the string representation +func (s ObjectIdentifier) GoString() string { + return s.String() } -// SetPrefix sets the Prefix field's value. -func (s *ListObjectVersionsOutput) SetPrefix(v string) *ListObjectVersionsOutput { - s.Prefix = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *ObjectIdentifier) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ObjectIdentifier"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetVersionIdMarker sets the VersionIdMarker field's value. -func (s *ListObjectVersionsOutput) SetVersionIdMarker(v string) *ListObjectVersionsOutput { - s.VersionIdMarker = &v +// SetKey sets the Key field's value. +func (s *ObjectIdentifier) SetKey(v string) *ObjectIdentifier { + s.Key = &v return s } -// SetVersions sets the Versions field's value. -func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVersionsOutput { - s.Versions = v +// SetVersionId sets the VersionId field's value. +func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier { + s.VersionId = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsRequest -type ListObjectsInput struct { +// The container element for Object Lock configuration parameters. +type ObjectLockConfiguration struct { _ struct{} `type:"structure"` - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Indicates whether this bucket has an Object Lock configuration enabled. + ObjectLockEnabled *string `type:"string" enum:"ObjectLockEnabled"` - // A delimiter is a character you use to group keys. - Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + // The Object Lock rule in place for the specified object. + Rule *ObjectLockRule `type:"structure"` +} - // Requests Amazon S3 to encode the object keys in the response and specifies - // the encoding method to use. An object key may contain any Unicode character; - // however, XML 1.0 parser cannot parse some characters, such as characters - // with an ASCII value from 0 to 10. For characters that are not supported in - // XML 1.0, you can add this parameter to request that Amazon S3 encode the - // keys in the response. - EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` +// String returns the string representation +func (s ObjectLockConfiguration) String() string { + return awsutil.Prettify(s) +} - // Specifies the key to start with when listing objects in a bucket. - Marker *string `location:"querystring" locationName:"marker" type:"string"` +// GoString returns the string representation +func (s ObjectLockConfiguration) GoString() string { + return s.String() +} - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. - MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` +// SetObjectLockEnabled sets the ObjectLockEnabled field's value. +func (s *ObjectLockConfiguration) SetObjectLockEnabled(v string) *ObjectLockConfiguration { + s.ObjectLockEnabled = &v + return s +} - // Limits the response to keys that begin with the specified prefix. - Prefix *string `location:"querystring" locationName:"prefix" type:"string"` +// SetRule sets the Rule field's value. +func (s *ObjectLockConfiguration) SetRule(v *ObjectLockRule) *ObjectLockConfiguration { + s.Rule = v + return s +} - // Confirms that the requester knows that she or he will be charged for the - // list objects request. Bucket owners need not specify this parameter in their - // requests. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +// A Legal Hold configuration for an object. +type ObjectLockLegalHold struct { + _ struct{} `type:"structure"` + + // Indicates whether the specified object has a Legal Hold in place. + Status *string `type:"string" enum:"ObjectLockLegalHoldStatus"` } // String returns the string representation -func (s ListObjectsInput) String() string { +func (s ObjectLockLegalHold) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListObjectsInput) GoString() string { +func (s ObjectLockLegalHold) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListObjectsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListObjectsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } +// SetStatus sets the Status field's value. +func (s *ObjectLockLegalHold) SetStatus(v string) *ObjectLockLegalHold { + s.Status = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// A Retention configuration for an object. +type ObjectLockRetention struct { + _ struct{} `type:"structure"` + + // Indicates the Retention mode for the specified object. + Mode *string `type:"string" enum:"ObjectLockRetentionMode"` + + // The date on which this Object Lock Retention will expire. + RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` } -// SetBucket sets the Bucket field's value. -func (s *ListObjectsInput) SetBucket(v string) *ListObjectsInput { - s.Bucket = &v - return s +// String returns the string representation +func (s ObjectLockRetention) String() string { + return awsutil.Prettify(s) } -// SetDelimiter sets the Delimiter field's value. -func (s *ListObjectsInput) SetDelimiter(v string) *ListObjectsInput { - s.Delimiter = &v - return s +// GoString returns the string representation +func (s ObjectLockRetention) GoString() string { + return s.String() } -// SetEncodingType sets the EncodingType field's value. -func (s *ListObjectsInput) SetEncodingType(v string) *ListObjectsInput { - s.EncodingType = &v +// SetMode sets the Mode field's value. +func (s *ObjectLockRetention) SetMode(v string) *ObjectLockRetention { + s.Mode = &v return s } -// SetMarker sets the Marker field's value. -func (s *ListObjectsInput) SetMarker(v string) *ListObjectsInput { - s.Marker = &v +// SetRetainUntilDate sets the RetainUntilDate field's value. +func (s *ObjectLockRetention) SetRetainUntilDate(v time.Time) *ObjectLockRetention { + s.RetainUntilDate = &v return s } -// SetMaxKeys sets the MaxKeys field's value. -func (s *ListObjectsInput) SetMaxKeys(v int64) *ListObjectsInput { - s.MaxKeys = &v - return s +// The container element for an Object Lock rule. +type ObjectLockRule struct { + _ struct{} `type:"structure"` + + // The default retention period that you want to apply to new objects placed + // in the specified bucket. + DefaultRetention *DefaultRetention `type:"structure"` } -// SetPrefix sets the Prefix field's value. -func (s *ListObjectsInput) SetPrefix(v string) *ListObjectsInput { - s.Prefix = &v - return s +// String returns the string representation +func (s ObjectLockRule) String() string { + return awsutil.Prettify(s) } -// SetRequestPayer sets the RequestPayer field's value. -func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput { - s.RequestPayer = &v +// GoString returns the string representation +func (s ObjectLockRule) GoString() string { + return s.String() +} + +// SetDefaultRetention sets the DefaultRetention field's value. +func (s *ObjectLockRule) SetDefaultRetention(v *DefaultRetention) *ObjectLockRule { + s.DefaultRetention = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsOutput -type ListObjectsOutput struct { +type ObjectVersion struct { _ struct{} `type:"structure"` - CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` - - Contents []*Object `type:"list" flattened:"true"` - - Delimiter *string `type:"string"` + ETag *string `type:"string"` - // Encoding type used by Amazon S3 to encode object keys in the response. - EncodingType *string `type:"string" enum:"EncodingType"` + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` - // A flag that indicates whether or not Amazon S3 returned all of the results - // that satisfied the search criteria. - IsTruncated *bool `type:"boolean"` + // The object key. + Key *string `min:"1" type:"string"` - Marker *string `type:"string"` + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp"` - MaxKeys *int64 `type:"integer"` + Owner *Owner `type:"structure"` - Name *string `type:"string"` + // Size in bytes of the object. + Size *int64 `type:"integer"` - // When response is truncated (the IsTruncated element value in the response - // is true), you can use the key name in this field as marker in the subsequent - // request to get next set of objects. Amazon S3 lists objects in alphabetical - // order Note: This element is returned only if you have delimiter request parameter - // specified. If response does not include the NextMaker and it is truncated, - // you can use the value of the last Key in the response as the marker in the - // subsequent request to get the next set of object keys. - NextMarker *string `type:"string"` + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"` - Prefix *string `type:"string"` + // Version ID of an object. + VersionId *string `type:"string"` } // String returns the string representation -func (s ListObjectsOutput) String() string { +func (s ObjectVersion) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListObjectsOutput) GoString() string { +func (s ObjectVersion) GoString() string { return s.String() } -// SetCommonPrefixes sets the CommonPrefixes field's value. -func (s *ListObjectsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsOutput { - s.CommonPrefixes = v +// SetETag sets the ETag field's value. +func (s *ObjectVersion) SetETag(v string) *ObjectVersion { + s.ETag = &v return s } -// SetContents sets the Contents field's value. -func (s *ListObjectsOutput) SetContents(v []*Object) *ListObjectsOutput { - s.Contents = v +// SetIsLatest sets the IsLatest field's value. +func (s *ObjectVersion) SetIsLatest(v bool) *ObjectVersion { + s.IsLatest = &v return s } -// SetDelimiter sets the Delimiter field's value. -func (s *ListObjectsOutput) SetDelimiter(v string) *ListObjectsOutput { - s.Delimiter = &v +// SetKey sets the Key field's value. +func (s *ObjectVersion) SetKey(v string) *ObjectVersion { + s.Key = &v return s } -// SetEncodingType sets the EncodingType field's value. -func (s *ListObjectsOutput) SetEncodingType(v string) *ListObjectsOutput { - s.EncodingType = &v +// SetLastModified sets the LastModified field's value. +func (s *ObjectVersion) SetLastModified(v time.Time) *ObjectVersion { + s.LastModified = &v return s } -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListObjectsOutput) SetIsTruncated(v bool) *ListObjectsOutput { - s.IsTruncated = &v +// SetOwner sets the Owner field's value. +func (s *ObjectVersion) SetOwner(v *Owner) *ObjectVersion { + s.Owner = v return s } -// SetMarker sets the Marker field's value. -func (s *ListObjectsOutput) SetMarker(v string) *ListObjectsOutput { - s.Marker = &v +// SetSize sets the Size field's value. +func (s *ObjectVersion) SetSize(v int64) *ObjectVersion { + s.Size = &v return s } -// SetMaxKeys sets the MaxKeys field's value. -func (s *ListObjectsOutput) SetMaxKeys(v int64) *ListObjectsOutput { - s.MaxKeys = &v +// SetStorageClass sets the StorageClass field's value. +func (s *ObjectVersion) SetStorageClass(v string) *ObjectVersion { + s.StorageClass = &v return s } -// SetName sets the Name field's value. -func (s *ListObjectsOutput) SetName(v string) *ListObjectsOutput { - s.Name = &v +// SetVersionId sets the VersionId field's value. +func (s *ObjectVersion) SetVersionId(v string) *ObjectVersion { + s.VersionId = &v return s } -// SetNextMarker sets the NextMarker field's value. -func (s *ListObjectsOutput) SetNextMarker(v string) *ListObjectsOutput { - s.NextMarker = &v - return s +// Describes the location where the restore job's output is stored. +type OutputLocation struct { + _ struct{} `type:"structure"` + + // Describes an S3 location that will receive the results of the restore request. + S3 *Location `type:"structure"` } -// SetPrefix sets the Prefix field's value. -func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput { - s.Prefix = &v +// String returns the string representation +func (s OutputLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OutputLocation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OutputLocation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OutputLocation"} + if s.S3 != nil { + if err := s.S3.Validate(); err != nil { + invalidParams.AddNested("S3", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3 sets the S3 field's value. +func (s *OutputLocation) SetS3(v *Location) *OutputLocation { + s.S3 = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Request -type ListObjectsV2Input struct { +// Describes how results of the Select job are serialized. +type OutputSerialization struct { _ struct{} `type:"structure"` - // Name of the bucket to list. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Describes the serialization of CSV-encoded Select results. + CSV *CSVOutput `type:"structure"` - // ContinuationToken indicates Amazon S3 that the list is being continued on - // this bucket with a token. ContinuationToken is obfuscated and is not a real - // key - ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + // Specifies JSON as request's output serialization format. + JSON *JSONOutput `type:"structure"` +} - // A delimiter is a character you use to group keys. - Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` +// String returns the string representation +func (s OutputSerialization) String() string { + return awsutil.Prettify(s) +} - // Encoding type used by Amazon S3 to encode object keys in the response. - EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` +// GoString returns the string representation +func (s OutputSerialization) GoString() string { + return s.String() +} - // The owner field is not present in listV2 by default, if you want to return - // owner field with each key in the result then set the fetch owner field to - // true - FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` +// SetCSV sets the CSV field's value. +func (s *OutputSerialization) SetCSV(v *CSVOutput) *OutputSerialization { + s.CSV = v + return s +} - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. - MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` +// SetJSON sets the JSON field's value. +func (s *OutputSerialization) SetJSON(v *JSONOutput) *OutputSerialization { + s.JSON = v + return s +} - // Limits the response to keys that begin with the specified prefix. - Prefix *string `location:"querystring" locationName:"prefix" type:"string"` +type Owner struct { + _ struct{} `type:"structure"` - // Confirms that the requester knows that she or he will be charged for the - // list objects request in V2 style. Bucket owners need not specify this parameter - // in their requests. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + DisplayName *string `type:"string"` - // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts - // listing after this specified key. StartAfter can be any key in the bucket - StartAfter *string `location:"querystring" locationName:"start-after" type:"string"` + ID *string `type:"string"` } // String returns the string representation -func (s ListObjectsV2Input) String() string { +func (s Owner) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListObjectsV2Input) GoString() string { +func (s Owner) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListObjectsV2Input) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListObjectsV2Input"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetDisplayName sets the DisplayName field's value. +func (s *Owner) SetDisplayName(v string) *Owner { + s.DisplayName = &v + return s } -// SetBucket sets the Bucket field's value. -func (s *ListObjectsV2Input) SetBucket(v string) *ListObjectsV2Input { - s.Bucket = &v +// SetID sets the ID field's value. +func (s *Owner) SetID(v string) *Owner { + s.ID = &v return s } -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListObjectsV2Input) SetContinuationToken(v string) *ListObjectsV2Input { - s.ContinuationToken = &v - return s +type ParquetInput struct { + _ struct{} `type:"structure"` } -// SetDelimiter sets the Delimiter field's value. -func (s *ListObjectsV2Input) SetDelimiter(v string) *ListObjectsV2Input { - s.Delimiter = &v - return s +// String returns the string representation +func (s ParquetInput) String() string { + return awsutil.Prettify(s) } -// SetEncodingType sets the EncodingType field's value. -func (s *ListObjectsV2Input) SetEncodingType(v string) *ListObjectsV2Input { - s.EncodingType = &v - return s +// GoString returns the string representation +func (s ParquetInput) GoString() string { + return s.String() } -// SetFetchOwner sets the FetchOwner field's value. -func (s *ListObjectsV2Input) SetFetchOwner(v bool) *ListObjectsV2Input { - s.FetchOwner = &v - return s +type Part struct { + _ struct{} `type:"structure"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Date and time at which the part was uploaded. + LastModified *time.Time `type:"timestamp"` + + // Part number identifying the part. This is a positive integer between 1 and + // 10,000. + PartNumber *int64 `type:"integer"` + + // Size in bytes of the uploaded part data. + Size *int64 `type:"integer"` } -// SetMaxKeys sets the MaxKeys field's value. -func (s *ListObjectsV2Input) SetMaxKeys(v int64) *ListObjectsV2Input { - s.MaxKeys = &v +// String returns the string representation +func (s Part) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Part) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *Part) SetETag(v string) *Part { + s.ETag = &v return s } -// SetPrefix sets the Prefix field's value. -func (s *ListObjectsV2Input) SetPrefix(v string) *ListObjectsV2Input { - s.Prefix = &v +// SetLastModified sets the LastModified field's value. +func (s *Part) SetLastModified(v time.Time) *Part { + s.LastModified = &v return s } -// SetRequestPayer sets the RequestPayer field's value. -func (s *ListObjectsV2Input) SetRequestPayer(v string) *ListObjectsV2Input { - s.RequestPayer = &v +// SetPartNumber sets the PartNumber field's value. +func (s *Part) SetPartNumber(v int64) *Part { + s.PartNumber = &v return s } -// SetStartAfter sets the StartAfter field's value. -func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input { - s.StartAfter = &v +// SetSize sets the Size field's value. +func (s *Part) SetSize(v int64) *Part { + s.Size = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Output -type ListObjectsV2Output struct { +// The container element for a bucket's policy status. +type PolicyStatus struct { _ struct{} `type:"structure"` - // CommonPrefixes contains all (if there are any) keys between Prefix and the - // next occurrence of the string specified by delimiter - CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` - - // Metadata about each object returned. - Contents []*Object `type:"list" flattened:"true"` - - // ContinuationToken indicates Amazon S3 that the list is being continued on - // this bucket with a token. ContinuationToken is obfuscated and is not a real - // key - ContinuationToken *string `type:"string"` - - // A delimiter is a character you use to group keys. - Delimiter *string `type:"string"` - - // Encoding type used by Amazon S3 to encode object keys in the response. - EncodingType *string `type:"string" enum:"EncodingType"` + // The policy status for this bucket. TRUE indicates that this bucket is public. + // FALSE indicates that the bucket is not public. + IsPublic *bool `locationName:"IsPublic" type:"boolean"` +} - // A flag that indicates whether or not Amazon S3 returned all of the results - // that satisfied the search criteria. - IsTruncated *bool `type:"boolean"` +// String returns the string representation +func (s PolicyStatus) String() string { + return awsutil.Prettify(s) +} - // KeyCount is the number of keys returned with this request. KeyCount will - // always be less than equals to MaxKeys field. Say you ask for 50 keys, your - // result will include less than equals 50 keys - KeyCount *int64 `type:"integer"` +// GoString returns the string representation +func (s PolicyStatus) GoString() string { + return s.String() +} - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. - MaxKeys *int64 `type:"integer"` +// SetIsPublic sets the IsPublic field's value. +func (s *PolicyStatus) SetIsPublic(v bool) *PolicyStatus { + s.IsPublic = &v + return s +} - // Name of the bucket to list. - Name *string `type:"string"` +type Progress struct { + _ struct{} `type:"structure"` - // NextContinuationToken is sent when isTruncated is true which means there - // are more keys in the bucket that can be listed. The next list requests to - // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken - // is obfuscated and is not a real key - NextContinuationToken *string `type:"string"` + // The current number of uncompressed object bytes processed. + BytesProcessed *int64 `type:"long"` - // Limits the response to keys that begin with the specified prefix. - Prefix *string `type:"string"` + // The current number of bytes of records payload data returned. + BytesReturned *int64 `type:"long"` - // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts - // listing after this specified key. StartAfter can be any key in the bucket - StartAfter *string `type:"string"` + // The current number of object bytes scanned. + BytesScanned *int64 `type:"long"` } // String returns the string representation -func (s ListObjectsV2Output) String() string { +func (s Progress) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListObjectsV2Output) GoString() string { +func (s Progress) GoString() string { return s.String() } -// SetCommonPrefixes sets the CommonPrefixes field's value. -func (s *ListObjectsV2Output) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsV2Output { - s.CommonPrefixes = v +// SetBytesProcessed sets the BytesProcessed field's value. +func (s *Progress) SetBytesProcessed(v int64) *Progress { + s.BytesProcessed = &v return s } -// SetContents sets the Contents field's value. -func (s *ListObjectsV2Output) SetContents(v []*Object) *ListObjectsV2Output { - s.Contents = v +// SetBytesReturned sets the BytesReturned field's value. +func (s *Progress) SetBytesReturned(v int64) *Progress { + s.BytesReturned = &v return s } -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListObjectsV2Output) SetContinuationToken(v string) *ListObjectsV2Output { - s.ContinuationToken = &v +// SetBytesScanned sets the BytesScanned field's value. +func (s *Progress) SetBytesScanned(v int64) *Progress { + s.BytesScanned = &v return s } -// SetDelimiter sets the Delimiter field's value. -func (s *ListObjectsV2Output) SetDelimiter(v string) *ListObjectsV2Output { - s.Delimiter = &v - return s +type ProgressEvent struct { + _ struct{} `locationName:"ProgressEvent" type:"structure" payload:"Details"` + + // The Progress event details. + Details *Progress `locationName:"Details" type:"structure"` } -// SetEncodingType sets the EncodingType field's value. -func (s *ListObjectsV2Output) SetEncodingType(v string) *ListObjectsV2Output { - s.EncodingType = &v - return s +// String returns the string representation +func (s ProgressEvent) String() string { + return awsutil.Prettify(s) } -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListObjectsV2Output) SetIsTruncated(v bool) *ListObjectsV2Output { - s.IsTruncated = &v - return s +// GoString returns the string representation +func (s ProgressEvent) GoString() string { + return s.String() } -// SetKeyCount sets the KeyCount field's value. -func (s *ListObjectsV2Output) SetKeyCount(v int64) *ListObjectsV2Output { - s.KeyCount = &v +// SetDetails sets the Details field's value. +func (s *ProgressEvent) SetDetails(v *Progress) *ProgressEvent { + s.Details = v return s } -// SetMaxKeys sets the MaxKeys field's value. -func (s *ListObjectsV2Output) SetMaxKeys(v int64) *ListObjectsV2Output { - s.MaxKeys = &v - return s +// The ProgressEvent is and event in the SelectObjectContentEventStream group of events. +func (s *ProgressEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the ProgressEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *ProgressEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + if err := payloadUnmarshaler.UnmarshalPayload( + bytes.NewReader(msg.Payload), s, + ); err != nil { + return err + } + return nil } -// SetName sets the Name field's value. -func (s *ListObjectsV2Output) SetName(v string) *ListObjectsV2Output { - s.Name = &v - return s +type PublicAccessBlockConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies whether Amazon S3 should block public access control lists (ACLs) + // for this bucket and objects in this bucket. Setting this element to TRUE + // causes the following behavior: + // + // * PUT Bucket acl and PUT Object acl calls fail if the specified ACL is + // public. + // + // * PUT Object calls fail if the request includes a public ACL. + // + // Enabling this setting doesn't affect existing policies or ACLs. + BlockPublicAcls *bool `locationName:"BlockPublicAcls" type:"boolean"` + + // Specifies whether Amazon S3 should block public bucket policies for this + // bucket. Setting this element to TRUE causes Amazon S3 to reject calls to + // PUT Bucket policy if the specified bucket policy allows public access. + // + // Enabling this setting doesn't affect existing bucket policies. + BlockPublicPolicy *bool `locationName:"BlockPublicPolicy" type:"boolean"` + + // Specifies whether Amazon S3 should ignore public ACLs for this bucket and + // objects in this bucket. Setting this element to TRUE causes Amazon S3 to + // ignore all public ACLs on this bucket and objects in this bucket. + // + // Enabling this setting doesn't affect the persistence of any existing ACLs + // and doesn't prevent new public ACLs from being set. + IgnorePublicAcls *bool `locationName:"IgnorePublicAcls" type:"boolean"` + + // Specifies whether Amazon S3 should restrict public bucket policies for this + // bucket. Setting this element to TRUE restricts access to this bucket to only + // AWS services and authorized users within this account if the bucket has a + // public policy. + // + // Enabling this setting doesn't affect previously stored bucket policies, except + // that public and cross-account access within any public bucket policy, including + // non-public delegation to specific accounts, is blocked. + RestrictPublicBuckets *bool `locationName:"RestrictPublicBuckets" type:"boolean"` } -// SetNextContinuationToken sets the NextContinuationToken field's value. -func (s *ListObjectsV2Output) SetNextContinuationToken(v string) *ListObjectsV2Output { - s.NextContinuationToken = &v - return s +// String returns the string representation +func (s PublicAccessBlockConfiguration) String() string { + return awsutil.Prettify(s) } -// SetPrefix sets the Prefix field's value. -func (s *ListObjectsV2Output) SetPrefix(v string) *ListObjectsV2Output { - s.Prefix = &v - return s +// GoString returns the string representation +func (s PublicAccessBlockConfiguration) GoString() string { + return s.String() } -// SetStartAfter sets the StartAfter field's value. -func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output { - s.StartAfter = &v +// SetBlockPublicAcls sets the BlockPublicAcls field's value. +func (s *PublicAccessBlockConfiguration) SetBlockPublicAcls(v bool) *PublicAccessBlockConfiguration { + s.BlockPublicAcls = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsRequest -type ListPartsInput struct { - _ struct{} `type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +// SetBlockPublicPolicy sets the BlockPublicPolicy field's value. +func (s *PublicAccessBlockConfiguration) SetBlockPublicPolicy(v bool) *PublicAccessBlockConfiguration { + s.BlockPublicPolicy = &v + return s +} - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` +// SetIgnorePublicAcls sets the IgnorePublicAcls field's value. +func (s *PublicAccessBlockConfiguration) SetIgnorePublicAcls(v bool) *PublicAccessBlockConfiguration { + s.IgnorePublicAcls = &v + return s +} - // Sets the maximum number of parts to return. - MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"` +// SetRestrictPublicBuckets sets the RestrictPublicBuckets field's value. +func (s *PublicAccessBlockConfiguration) SetRestrictPublicBuckets(v bool) *PublicAccessBlockConfiguration { + s.RestrictPublicBuckets = &v + return s +} - // Specifies the part after which listing should begin. Only parts with higher - // part numbers will be listed. - PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` +type PutBucketAccelerateConfigurationInput struct { + _ struct{} `type:"structure" payload:"AccelerateConfiguration"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // Specifies the Accelerate Configuration you want to set for the bucket. + // + // AccelerateConfiguration is a required field + AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // Upload ID identifying the multipart upload whose parts are being listed. + // Name of the bucket for which the accelerate configuration is set. // - // UploadId is a required field - UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } // String returns the string representation -func (s ListPartsInput) String() string { +func (s PutBucketAccelerateConfigurationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListPartsInput) GoString() string { +func (s PutBucketAccelerateConfigurationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListPartsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListPartsInput"} +func (s *PutBucketAccelerateConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAccelerateConfigurationInput"} + if s.AccelerateConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("AccelerateConfiguration")) + } if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.UploadId == nil { - invalidParams.Add(request.NewErrParamRequired("UploadId")) + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) } if invalidParams.Len() > 0 { @@ -14137,225 +18535,214 @@ func (s *ListPartsInput) Validate() error { return nil } -// SetBucket sets the Bucket field's value. -func (s *ListPartsInput) SetBucket(v string) *ListPartsInput { - s.Bucket = &v +// SetAccelerateConfiguration sets the AccelerateConfiguration field's value. +func (s *PutBucketAccelerateConfigurationInput) SetAccelerateConfiguration(v *AccelerateConfiguration) *PutBucketAccelerateConfigurationInput { + s.AccelerateConfiguration = v return s } -// SetKey sets the Key field's value. -func (s *ListPartsInput) SetKey(v string) *ListPartsInput { - s.Key = &v +// SetBucket sets the Bucket field's value. +func (s *PutBucketAccelerateConfigurationInput) SetBucket(v string) *PutBucketAccelerateConfigurationInput { + s.Bucket = &v return s } -// SetMaxParts sets the MaxParts field's value. -func (s *ListPartsInput) SetMaxParts(v int64) *ListPartsInput { - s.MaxParts = &v - return s +func (s *PutBucketAccelerateConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// SetPartNumberMarker sets the PartNumberMarker field's value. -func (s *ListPartsInput) SetPartNumberMarker(v int64) *ListPartsInput { - s.PartNumberMarker = &v - return s +type PutBucketAccelerateConfigurationOutput struct { + _ struct{} `type:"structure"` } -// SetRequestPayer sets the RequestPayer field's value. -func (s *ListPartsInput) SetRequestPayer(v string) *ListPartsInput { - s.RequestPayer = &v - return s +// String returns the string representation +func (s PutBucketAccelerateConfigurationOutput) String() string { + return awsutil.Prettify(s) } -// SetUploadId sets the UploadId field's value. -func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput { - s.UploadId = &v - return s +// GoString returns the string representation +func (s PutBucketAccelerateConfigurationOutput) GoString() string { + return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsOutput -type ListPartsOutput struct { - _ struct{} `type:"structure"` - - // Date when multipart upload will become eligible for abort operation by lifecycle. - AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"` - - // Id of the lifecycle rule that makes a multipart upload eligible for abort - // operation. - AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` - - // Name of the bucket to which the multipart upload was initiated. - Bucket *string `type:"string"` - - // Identifies who initiated the multipart upload. - Initiator *Initiator `type:"structure"` - - // Indicates whether the returned list of parts is truncated. - IsTruncated *bool `type:"boolean"` - - // Object key for which the multipart upload was initiated. - Key *string `min:"1" type:"string"` +type PutBucketAclInput struct { + _ struct{} `type:"structure" payload:"AccessControlPolicy"` - // Maximum number of parts that were allowed in the response. - MaxParts *int64 `type:"integer"` + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` - // When a list is truncated, this element specifies the last part in the list, - // as well as the value to use for the part-number-marker request parameter - // in a subsequent request. - NextPartNumberMarker *int64 `type:"integer"` + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - Owner *Owner `type:"structure"` + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Part number after which listing begins. - PartNumberMarker *int64 `type:"integer"` + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - Parts []*Part `locationName:"Part" type:"list" flattened:"true"` + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"StorageClass"` + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` - // Upload ID identifying the multipart upload whose parts are being listed. - UploadId *string `type:"string"` + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` } // String returns the string representation -func (s ListPartsOutput) String() string { +func (s PutBucketAclInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListPartsOutput) GoString() string { +func (s PutBucketAclInput) GoString() string { return s.String() } -// SetAbortDate sets the AbortDate field's value. -func (s *ListPartsOutput) SetAbortDate(v time.Time) *ListPartsOutput { - s.AbortDate = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.AccessControlPolicy != nil { + if err := s.AccessControlPolicy.Validate(); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) + } + } -// SetAbortRuleId sets the AbortRuleId field's value. -func (s *ListPartsOutput) SetAbortRuleId(v string) *ListPartsOutput { - s.AbortRuleId = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetBucket sets the Bucket field's value. -func (s *ListPartsOutput) SetBucket(v string) *ListPartsOutput { - s.Bucket = &v +// SetACL sets the ACL field's value. +func (s *PutBucketAclInput) SetACL(v string) *PutBucketAclInput { + s.ACL = &v return s } -// SetInitiator sets the Initiator field's value. -func (s *ListPartsOutput) SetInitiator(v *Initiator) *ListPartsOutput { - s.Initiator = v +// SetAccessControlPolicy sets the AccessControlPolicy field's value. +func (s *PutBucketAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutBucketAclInput { + s.AccessControlPolicy = v return s } -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListPartsOutput) SetIsTruncated(v bool) *ListPartsOutput { - s.IsTruncated = &v +// SetBucket sets the Bucket field's value. +func (s *PutBucketAclInput) SetBucket(v string) *PutBucketAclInput { + s.Bucket = &v return s } -// SetKey sets the Key field's value. -func (s *ListPartsOutput) SetKey(v string) *ListPartsOutput { - s.Key = &v - return s +func (s *PutBucketAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// SetMaxParts sets the MaxParts field's value. -func (s *ListPartsOutput) SetMaxParts(v int64) *ListPartsOutput { - s.MaxParts = &v +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutBucketAclInput) SetGrantFullControl(v string) *PutBucketAclInput { + s.GrantFullControl = &v return s } -// SetNextPartNumberMarker sets the NextPartNumberMarker field's value. -func (s *ListPartsOutput) SetNextPartNumberMarker(v int64) *ListPartsOutput { - s.NextPartNumberMarker = &v +// SetGrantRead sets the GrantRead field's value. +func (s *PutBucketAclInput) SetGrantRead(v string) *PutBucketAclInput { + s.GrantRead = &v return s } -// SetOwner sets the Owner field's value. -func (s *ListPartsOutput) SetOwner(v *Owner) *ListPartsOutput { - s.Owner = v +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutBucketAclInput) SetGrantReadACP(v string) *PutBucketAclInput { + s.GrantReadACP = &v return s } -// SetPartNumberMarker sets the PartNumberMarker field's value. -func (s *ListPartsOutput) SetPartNumberMarker(v int64) *ListPartsOutput { - s.PartNumberMarker = &v +// SetGrantWrite sets the GrantWrite field's value. +func (s *PutBucketAclInput) SetGrantWrite(v string) *PutBucketAclInput { + s.GrantWrite = &v return s } -// SetParts sets the Parts field's value. -func (s *ListPartsOutput) SetParts(v []*Part) *ListPartsOutput { - s.Parts = v +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput { + s.GrantWriteACP = &v return s } -// SetRequestCharged sets the RequestCharged field's value. -func (s *ListPartsOutput) SetRequestCharged(v string) *ListPartsOutput { - s.RequestCharged = &v - return s +type PutBucketAclOutput struct { + _ struct{} `type:"structure"` } -// SetStorageClass sets the StorageClass field's value. -func (s *ListPartsOutput) SetStorageClass(v string) *ListPartsOutput { - s.StorageClass = &v - return s +// String returns the string representation +func (s PutBucketAclOutput) String() string { + return awsutil.Prettify(s) } -// SetUploadId sets the UploadId field's value. -func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput { - s.UploadId = &v - return s +// GoString returns the string representation +func (s PutBucketAclOutput) GoString() string { + return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LoggingEnabled -type LoggingEnabled struct { - _ struct{} `type:"structure"` +type PutBucketAnalyticsConfigurationInput struct { + _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` - // Specifies the bucket where you want Amazon S3 to store server access logs. - // You can have your logs delivered to any bucket that you own, including the - // same bucket that is being logged. You can also configure multiple buckets - // to deliver their logs to the same target bucket. In this case you should - // choose a different TargetPrefix for each source bucket so that the delivered - // log files can be distinguished by key. - TargetBucket *string `type:"string"` + // The configuration and any analyses for the analytics filter. + // + // AnalyticsConfiguration is a required field + AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` + // The name of the bucket to which an analytics configuration is stored. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // This element lets you specify a prefix for the keys that the log files will - // be stored under. - TargetPrefix *string `type:"string"` + // The identifier used to represent an analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` } // String returns the string representation -func (s LoggingEnabled) String() string { +func (s PutBucketAnalyticsConfigurationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s LoggingEnabled) GoString() string { +func (s PutBucketAnalyticsConfigurationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *LoggingEnabled) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"} - if s.TargetGrants != nil { - for i, v := range s.TargetGrants { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetGrants", i), err.(request.ErrInvalidParams)) - } +func (s *PutBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAnalyticsConfigurationInput"} + if s.AnalyticsConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("AnalyticsConfiguration")) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.AnalyticsConfiguration != nil { + if err := s.AnalyticsConfiguration.Validate(); err != nil { + invalidParams.AddNested("AnalyticsConfiguration", err.(request.ErrInvalidParams)) } } @@ -14365,111 +18752,80 @@ func (s *LoggingEnabled) Validate() error { return nil } -// SetTargetBucket sets the TargetBucket field's value. -func (s *LoggingEnabled) SetTargetBucket(v string) *LoggingEnabled { - s.TargetBucket = &v +// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *PutBucketAnalyticsConfigurationInput { + s.AnalyticsConfiguration = v return s } -// SetTargetGrants sets the TargetGrants field's value. -func (s *LoggingEnabled) SetTargetGrants(v []*TargetGrant) *LoggingEnabled { - s.TargetGrants = v +// SetBucket sets the Bucket field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetBucket(v string) *PutBucketAnalyticsConfigurationInput { + s.Bucket = &v return s } -// SetTargetPrefix sets the TargetPrefix field's value. -func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled { - s.TargetPrefix = &v +func (s *PutBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyticsConfigurationInput { + s.Id = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsAndOperator -type MetricsAndOperator struct { +type PutBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure"` - - // The prefix used when evaluating an AND predicate. - Prefix *string `type:"string"` - - // The list of tags used when evaluating an AND predicate. - Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` } // String returns the string representation -func (s MetricsAndOperator) String() string { +func (s PutBucketAnalyticsConfigurationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s MetricsAndOperator) GoString() string { +func (s PutBucketAnalyticsConfigurationOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *MetricsAndOperator) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "MetricsAndOperator"} - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPrefix sets the Prefix field's value. -func (s *MetricsAndOperator) SetPrefix(v string) *MetricsAndOperator { - s.Prefix = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator { - s.Tags = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsConfiguration -type MetricsConfiguration struct { - _ struct{} `type:"structure"` +type PutBucketCorsInput struct { + _ struct{} `type:"structure" payload:"CORSConfiguration"` - // Specifies a metrics configuration filter. The metrics configuration will - // only include objects that meet the filter's criteria. A filter must be a - // prefix, a tag, or a conjunction (MetricsAndOperator). - Filter *MetricsFilter `type:"structure"` + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The ID used to identify the metrics configuration. - // - // Id is a required field - Id *string `type:"string" required:"true"` + // CORSConfiguration is a required field + CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s MetricsConfiguration) String() string { +func (s PutBucketCorsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s MetricsConfiguration) GoString() string { +func (s PutBucketCorsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *MetricsConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "MetricsConfiguration"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) +func (s *PutBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) } - if s.Filter != nil { - if err := s.Filter.Validate(); err != nil { - invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + if s.CORSConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("CORSConfiguration")) + } + if s.CORSConfiguration != nil { + if err := s.CORSConfiguration.Validate(); err != nil { + invalidParams.AddNested("CORSConfiguration", err.(request.ErrInvalidParams)) } } @@ -14479,55 +18835,80 @@ func (s *MetricsConfiguration) Validate() error { return nil } -// SetFilter sets the Filter field's value. -func (s *MetricsConfiguration) SetFilter(v *MetricsFilter) *MetricsConfiguration { - s.Filter = v +// SetBucket sets the Bucket field's value. +func (s *PutBucketCorsInput) SetBucket(v string) *PutBucketCorsInput { + s.Bucket = &v return s } -// SetId sets the Id field's value. -func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration { - s.Id = &v +func (s *PutBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCORSConfiguration sets the CORSConfiguration field's value. +func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBucketCorsInput { + s.CORSConfiguration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsFilter -type MetricsFilter struct { +type PutBucketCorsOutput struct { _ struct{} `type:"structure"` +} - // A conjunction (logical AND) of predicates, which is used in evaluating a - // metrics filter. The operator must have at least two predicates, and an object - // must match all of the predicates in order for the filter to apply. - And *MetricsAndOperator `type:"structure"` +// String returns the string representation +func (s PutBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} - // The prefix used when evaluating a metrics filter. - Prefix *string `type:"string"` +// GoString returns the string representation +func (s PutBucketCorsOutput) GoString() string { + return s.String() +} - // The tag used when evaluating a metrics filter. - Tag *Tag `type:"structure"` +type PutBucketEncryptionInput struct { + _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` + + // The name of the bucket for which the server-side encryption configuration + // is set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Container for server-side encryption configuration rules. Currently S3 supports + // one rule only. + // + // ServerSideEncryptionConfiguration is a required field + ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `locationName:"ServerSideEncryptionConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s MetricsFilter) String() string { +func (s PutBucketEncryptionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s MetricsFilter) GoString() string { +func (s PutBucketEncryptionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *MetricsFilter) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "MetricsFilter"} - if s.And != nil { - if err := s.And.Validate(); err != nil { - invalidParams.AddNested("And", err.(request.ErrInvalidParams)) - } +func (s *PutBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketEncryptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Tag != nil { - if err := s.Tag.Validate(); err != nil { - invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.ServerSideEncryptionConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ServerSideEncryptionConfiguration")) + } + if s.ServerSideEncryptionConfiguration != nil { + if err := s.ServerSideEncryptionConfiguration.Validate(); err != nil { + invalidParams.AddNested("ServerSideEncryptionConfiguration", err.(request.ErrInvalidParams)) } } @@ -14537,219 +18918,238 @@ func (s *MetricsFilter) Validate() error { return nil } -// SetAnd sets the And field's value. -func (s *MetricsFilter) SetAnd(v *MetricsAndOperator) *MetricsFilter { - s.And = v +// SetBucket sets the Bucket field's value. +func (s *PutBucketEncryptionInput) SetBucket(v string) *PutBucketEncryptionInput { + s.Bucket = &v return s } -// SetPrefix sets the Prefix field's value. -func (s *MetricsFilter) SetPrefix(v string) *MetricsFilter { - s.Prefix = &v - return s +func (s *PutBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// SetTag sets the Tag field's value. -func (s *MetricsFilter) SetTag(v *Tag) *MetricsFilter { - s.Tag = v +// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value. +func (s *PutBucketEncryptionInput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *PutBucketEncryptionInput { + s.ServerSideEncryptionConfiguration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MultipartUpload -type MultipartUpload struct { +type PutBucketEncryptionOutput struct { _ struct{} `type:"structure"` +} - // Date and time at which the multipart upload was initiated. - Initiated *time.Time `type:"timestamp" timestampFormat:"iso8601"` +// String returns the string representation +func (s PutBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} - // Identifies who initiated the multipart upload. - Initiator *Initiator `type:"structure"` +// GoString returns the string representation +func (s PutBucketEncryptionOutput) GoString() string { + return s.String() +} - // Key of the object for which the multipart upload was initiated. - Key *string `min:"1" type:"string"` +type PutBucketInventoryConfigurationInput struct { + _ struct{} `type:"structure" payload:"InventoryConfiguration"` - Owner *Owner `type:"structure"` + // The name of the bucket where the inventory configuration will be stored. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"StorageClass"` + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` - // Upload ID that identifies the multipart upload. - UploadId *string `type:"string"` + // Specifies the inventory configuration. + // + // InventoryConfiguration is a required field + InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s MultipartUpload) String() string { +func (s PutBucketInventoryConfigurationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s MultipartUpload) GoString() string { +func (s PutBucketInventoryConfigurationInput) GoString() string { return s.String() } -// SetInitiated sets the Initiated field's value. -func (s *MultipartUpload) SetInitiated(v time.Time) *MultipartUpload { - s.Initiated = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.InventoryConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("InventoryConfiguration")) + } + if s.InventoryConfiguration != nil { + if err := s.InventoryConfiguration.Validate(); err != nil { + invalidParams.AddNested("InventoryConfiguration", err.(request.ErrInvalidParams)) + } + } -// SetInitiator sets the Initiator field's value. -func (s *MultipartUpload) SetInitiator(v *Initiator) *MultipartUpload { - s.Initiator = v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetKey sets the Key field's value. -func (s *MultipartUpload) SetKey(v string) *MultipartUpload { - s.Key = &v +// SetBucket sets the Bucket field's value. +func (s *PutBucketInventoryConfigurationInput) SetBucket(v string) *PutBucketInventoryConfigurationInput { + s.Bucket = &v return s } -// SetOwner sets the Owner field's value. -func (s *MultipartUpload) SetOwner(v *Owner) *MultipartUpload { - s.Owner = v - return s +func (s *PutBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// SetStorageClass sets the StorageClass field's value. -func (s *MultipartUpload) SetStorageClass(v string) *MultipartUpload { - s.StorageClass = &v +// SetId sets the Id field's value. +func (s *PutBucketInventoryConfigurationInput) SetId(v string) *PutBucketInventoryConfigurationInput { + s.Id = &v return s } -// SetUploadId sets the UploadId field's value. -func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload { - s.UploadId = &v +// SetInventoryConfiguration sets the InventoryConfiguration field's value. +func (s *PutBucketInventoryConfigurationInput) SetInventoryConfiguration(v *InventoryConfiguration) *PutBucketInventoryConfigurationInput { + s.InventoryConfiguration = v return s } -// Specifies when noncurrent object versions expire. Upon expiration, Amazon -// S3 permanently deletes the noncurrent object versions. You set this lifecycle -// configuration action on a bucket that has versioning enabled (or suspended) -// to request that Amazon S3 delete noncurrent object versions at a specific -// period in the object's lifetime. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionExpiration -type NoncurrentVersionExpiration struct { +type PutBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure"` - - // Specifies the number of days an object is noncurrent before Amazon S3 can - // perform the associated action. For information about the noncurrent days - // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) - NoncurrentDays *int64 `type:"integer"` } // String returns the string representation -func (s NoncurrentVersionExpiration) String() string { +func (s PutBucketInventoryConfigurationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s NoncurrentVersionExpiration) GoString() string { +func (s PutBucketInventoryConfigurationOutput) GoString() string { return s.String() } -// SetNoncurrentDays sets the NoncurrentDays field's value. -func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVersionExpiration { - s.NoncurrentDays = &v - return s -} - -// Container for the transition rule that describes when noncurrent objects -// transition to the STANDARD_IA or GLACIER storage class. If your bucket is -// versioning-enabled (or versioning is suspended), you can set this action -// to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA -// or GLACIER storage class at a specific period in the object's lifetime. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionTransition -type NoncurrentVersionTransition struct { - _ struct{} `type:"structure"` +type PutBucketLifecycleConfigurationInput struct { + _ struct{} `type:"structure" payload:"LifecycleConfiguration"` - // Specifies the number of days an object is noncurrent before Amazon S3 can - // perform the associated action. For information about the noncurrent days - // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) - NoncurrentDays *int64 `type:"integer"` + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"TransitionStorageClass"` + LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s NoncurrentVersionTransition) String() string { +func (s PutBucketLifecycleConfigurationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s NoncurrentVersionTransition) GoString() string { +func (s PutBucketLifecycleConfigurationInput) GoString() string { return s.String() } -// SetNoncurrentDays sets the NoncurrentDays field's value. -func (s *NoncurrentVersionTransition) SetNoncurrentDays(v int64) *NoncurrentVersionTransition { - s.NoncurrentDays = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.LifecycleConfiguration != nil { + if err := s.LifecycleConfiguration.Validate(); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketLifecycleConfigurationInput) SetBucket(v string) *PutBucketLifecycleConfigurationInput { + s.Bucket = &v return s } -// SetStorageClass sets the StorageClass field's value. -func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersionTransition { - s.StorageClass = &v +func (s *PutBucketLifecycleConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. +func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *BucketLifecycleConfiguration) *PutBucketLifecycleConfigurationInput { + s.LifecycleConfiguration = v return s } -// Container for specifying the notification configuration of the bucket. If -// this element is empty, notifications are turned off on the bucket. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfiguration -type NotificationConfiguration struct { - _ struct{} `type:"structure"` - - LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` +type PutBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketLifecycleInput struct { + _ struct{} `type:"structure" payload:"LifecycleConfiguration"` - QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` + LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s NotificationConfiguration) String() string { +func (s PutBucketLifecycleInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s NotificationConfiguration) GoString() string { +func (s PutBucketLifecycleInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *NotificationConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "NotificationConfiguration"} - if s.LambdaFunctionConfigurations != nil { - for i, v := range s.LambdaFunctionConfigurations { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LambdaFunctionConfigurations", i), err.(request.ErrInvalidParams)) - } - } +func (s *PutBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.QueueConfigurations != nil { - for i, v := range s.QueueConfigurations { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueueConfigurations", i), err.(request.ErrInvalidParams)) - } - } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) } - if s.TopicConfigurations != nil { - for i, v := range s.TopicConfigurations { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TopicConfigurations", i), err.(request.ErrInvalidParams)) - } + if s.LifecycleConfiguration != nil { + if err := s.LifecycleConfiguration.Validate(); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) } } @@ -14759,184 +19159,250 @@ func (s *NotificationConfiguration) Validate() error { return nil } -// SetLambdaFunctionConfigurations sets the LambdaFunctionConfigurations field's value. -func (s *NotificationConfiguration) SetLambdaFunctionConfigurations(v []*LambdaFunctionConfiguration) *NotificationConfiguration { - s.LambdaFunctionConfigurations = v +// SetBucket sets the Bucket field's value. +func (s *PutBucketLifecycleInput) SetBucket(v string) *PutBucketLifecycleInput { + s.Bucket = &v return s } -// SetQueueConfigurations sets the QueueConfigurations field's value. -func (s *NotificationConfiguration) SetQueueConfigurations(v []*QueueConfiguration) *NotificationConfiguration { - s.QueueConfigurations = v - return s +func (s *PutBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// SetTopicConfigurations sets the TopicConfigurations field's value. -func (s *NotificationConfiguration) SetTopicConfigurations(v []*TopicConfiguration) *NotificationConfiguration { - s.TopicConfigurations = v +// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. +func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfiguration) *PutBucketLifecycleInput { + s.LifecycleConfiguration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationDeprecated -type NotificationConfigurationDeprecated struct { +type PutBucketLifecycleOutput struct { _ struct{} `type:"structure"` +} - CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"` +// String returns the string representation +func (s PutBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} - QueueConfiguration *QueueConfigurationDeprecated `type:"structure"` +// GoString returns the string representation +func (s PutBucketLifecycleOutput) GoString() string { + return s.String() +} - TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` +type PutBucketLoggingInput struct { + _ struct{} `type:"structure" payload:"BucketLoggingStatus"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // BucketLoggingStatus is a required field + BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s NotificationConfigurationDeprecated) String() string { +func (s PutBucketLoggingInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s NotificationConfigurationDeprecated) GoString() string { +func (s PutBucketLoggingInput) GoString() string { return s.String() } -// SetCloudFunctionConfiguration sets the CloudFunctionConfiguration field's value. -func (s *NotificationConfigurationDeprecated) SetCloudFunctionConfiguration(v *CloudFunctionConfiguration) *NotificationConfigurationDeprecated { - s.CloudFunctionConfiguration = v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLoggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.BucketLoggingStatus == nil { + invalidParams.Add(request.NewErrParamRequired("BucketLoggingStatus")) + } + if s.BucketLoggingStatus != nil { + if err := s.BucketLoggingStatus.Validate(); err != nil { + invalidParams.AddNested("BucketLoggingStatus", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetQueueConfiguration sets the QueueConfiguration field's value. -func (s *NotificationConfigurationDeprecated) SetQueueConfiguration(v *QueueConfigurationDeprecated) *NotificationConfigurationDeprecated { - s.QueueConfiguration = v +// SetBucket sets the Bucket field's value. +func (s *PutBucketLoggingInput) SetBucket(v string) *PutBucketLoggingInput { + s.Bucket = &v return s } -// SetTopicConfiguration sets the TopicConfiguration field's value. -func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConfigurationDeprecated) *NotificationConfigurationDeprecated { - s.TopicConfiguration = v +func (s *PutBucketLoggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketLoggingStatus sets the BucketLoggingStatus field's value. +func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) *PutBucketLoggingInput { + s.BucketLoggingStatus = v return s } -// Container for object key name filtering rules. For information about key -// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationFilter -type NotificationConfigurationFilter struct { +type PutBucketLoggingOutput struct { _ struct{} `type:"structure"` - - // Container for object key name prefix and suffix filtering rules. - Key *KeyFilter `locationName:"S3Key" type:"structure"` } // String returns the string representation -func (s NotificationConfigurationFilter) String() string { +func (s PutBucketLoggingOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s NotificationConfigurationFilter) GoString() string { +func (s PutBucketLoggingOutput) GoString() string { return s.String() } -// SetKey sets the Key field's value. -func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConfigurationFilter { - s.Key = v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Object -type Object struct { - _ struct{} `type:"structure"` - - ETag *string `type:"string"` - - Key *string `min:"1" type:"string"` - - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` +type PutBucketMetricsConfigurationInput struct { + _ struct{} `type:"structure" payload:"MetricsConfiguration"` - Owner *Owner `type:"structure"` + // The name of the bucket for which the metrics configuration is set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - Size *int64 `type:"integer"` + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"ObjectStorageClass"` + // Specifies the metrics configuration. + // + // MetricsConfiguration is a required field + MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s Object) String() string { +func (s PutBucketMetricsConfigurationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Object) GoString() string { +func (s PutBucketMetricsConfigurationInput) GoString() string { return s.String() } -// SetETag sets the ETag field's value. -func (s *Object) SetETag(v string) *Object { - s.ETag = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketMetricsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.MetricsConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("MetricsConfiguration")) + } + if s.MetricsConfiguration != nil { + if err := s.MetricsConfiguration.Validate(); err != nil { + invalidParams.AddNested("MetricsConfiguration", err.(request.ErrInvalidParams)) + } + } -// SetKey sets the Key field's value. -func (s *Object) SetKey(v string) *Object { - s.Key = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetLastModified sets the LastModified field's value. -func (s *Object) SetLastModified(v time.Time) *Object { - s.LastModified = &v +// SetBucket sets the Bucket field's value. +func (s *PutBucketMetricsConfigurationInput) SetBucket(v string) *PutBucketMetricsConfigurationInput { + s.Bucket = &v return s } -// SetOwner sets the Owner field's value. -func (s *Object) SetOwner(v *Owner) *Object { - s.Owner = v - return s +func (s *PutBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// SetSize sets the Size field's value. -func (s *Object) SetSize(v int64) *Object { - s.Size = &v +// SetId sets the Id field's value. +func (s *PutBucketMetricsConfigurationInput) SetId(v string) *PutBucketMetricsConfigurationInput { + s.Id = &v return s } -// SetStorageClass sets the StorageClass field's value. -func (s *Object) SetStorageClass(v string) *Object { - s.StorageClass = &v +// SetMetricsConfiguration sets the MetricsConfiguration field's value. +func (s *PutBucketMetricsConfigurationInput) SetMetricsConfiguration(v *MetricsConfiguration) *PutBucketMetricsConfigurationInput { + s.MetricsConfiguration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectIdentifier -type ObjectIdentifier struct { +type PutBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure"` +} - // Key name of the object to delete. - // - // Key is a required field - Key *string `min:"1" type:"string" required:"true"` +// String returns the string representation +func (s PutBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} - // VersionId for the specific version of the object to delete. - VersionId *string `type:"string"` +// GoString returns the string representation +func (s PutBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketNotificationConfigurationInput struct { + _ struct{} `type:"structure" payload:"NotificationConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A container for specifying the notification configuration of the bucket. + // If this element is empty, notifications are turned off for the bucket. + // + // NotificationConfiguration is a required field + NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s ObjectIdentifier) String() string { +func (s PutBucketNotificationConfigurationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ObjectIdentifier) GoString() string { +func (s PutBucketNotificationConfigurationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ObjectIdentifier) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ObjectIdentifier"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) +func (s *PutBucketNotificationConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.NotificationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) + } + if s.NotificationConfiguration != nil { + if err := s.NotificationConfiguration.Validate(); err != nil { + invalidParams.AddNested("NotificationConfiguration", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -14945,221 +19411,235 @@ func (s *ObjectIdentifier) Validate() error { return nil } -// SetKey sets the Key field's value. -func (s *ObjectIdentifier) SetKey(v string) *ObjectIdentifier { - s.Key = &v +// SetBucket sets the Bucket field's value. +func (s *PutBucketNotificationConfigurationInput) SetBucket(v string) *PutBucketNotificationConfigurationInput { + s.Bucket = &v return s } -// SetVersionId sets the VersionId field's value. -func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier { - s.VersionId = &v +func (s *PutBucketNotificationConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetNotificationConfiguration sets the NotificationConfiguration field's value. +func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v *NotificationConfiguration) *PutBucketNotificationConfigurationInput { + s.NotificationConfiguration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectVersion -type ObjectVersion struct { +type PutBucketNotificationConfigurationOutput struct { _ struct{} `type:"structure"` - - ETag *string `type:"string"` - - // Specifies whether the object is (true) or is not (false) the latest version - // of an object. - IsLatest *bool `type:"boolean"` - - // The object key. - Key *string `min:"1" type:"string"` - - // Date and time the object was last modified. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - Owner *Owner `type:"structure"` - - // Size in bytes of the object. - Size *int64 `type:"integer"` - - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"` - - // Version ID of an object. - VersionId *string `type:"string"` } // String returns the string representation -func (s ObjectVersion) String() string { +func (s PutBucketNotificationConfigurationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ObjectVersion) GoString() string { +func (s PutBucketNotificationConfigurationOutput) GoString() string { return s.String() } -// SetETag sets the ETag field's value. -func (s *ObjectVersion) SetETag(v string) *ObjectVersion { - s.ETag = &v - return s -} +type PutBucketNotificationInput struct { + _ struct{} `type:"structure" payload:"NotificationConfiguration"` -// SetIsLatest sets the IsLatest field's value. -func (s *ObjectVersion) SetIsLatest(v bool) *ObjectVersion { - s.IsLatest = &v - return s + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // NotificationConfiguration is a required field + NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } -// SetKey sets the Key field's value. -func (s *ObjectVersion) SetKey(v string) *ObjectVersion { - s.Key = &v - return s +// String returns the string representation +func (s PutBucketNotificationInput) String() string { + return awsutil.Prettify(s) } -// SetLastModified sets the LastModified field's value. -func (s *ObjectVersion) SetLastModified(v time.Time) *ObjectVersion { - s.LastModified = &v - return s +// GoString returns the string representation +func (s PutBucketNotificationInput) GoString() string { + return s.String() } -// SetOwner sets the Owner field's value. -func (s *ObjectVersion) SetOwner(v *Owner) *ObjectVersion { - s.Owner = v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketNotificationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.NotificationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetSize sets the Size field's value. -func (s *ObjectVersion) SetSize(v int64) *ObjectVersion { - s.Size = &v +// SetBucket sets the Bucket field's value. +func (s *PutBucketNotificationInput) SetBucket(v string) *PutBucketNotificationInput { + s.Bucket = &v return s } -// SetStorageClass sets the StorageClass field's value. -func (s *ObjectVersion) SetStorageClass(v string) *ObjectVersion { - s.StorageClass = &v - return s +func (s *PutBucketNotificationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// SetVersionId sets the VersionId field's value. -func (s *ObjectVersion) SetVersionId(v string) *ObjectVersion { - s.VersionId = &v +// SetNotificationConfiguration sets the NotificationConfiguration field's value. +func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *NotificationConfigurationDeprecated) *PutBucketNotificationInput { + s.NotificationConfiguration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Owner -type Owner struct { +type PutBucketNotificationOutput struct { _ struct{} `type:"structure"` - - DisplayName *string `type:"string"` - - ID *string `type:"string"` } // String returns the string representation -func (s Owner) String() string { +func (s PutBucketNotificationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Owner) GoString() string { +func (s PutBucketNotificationOutput) GoString() string { return s.String() } -// SetDisplayName sets the DisplayName field's value. -func (s *Owner) SetDisplayName(v string) *Owner { - s.DisplayName = &v - return s -} - -// SetID sets the ID field's value. -func (s *Owner) SetID(v string) *Owner { - s.ID = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Part -type Part struct { - _ struct{} `type:"structure"` - - // Entity tag returned when the part was uploaded. - ETag *string `type:"string"` +type PutBucketPolicyInput struct { + _ struct{} `type:"structure" payload:"Policy"` - // Date and time at which the part was uploaded. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Part number identifying the part. This is a positive integer between 1 and - // 10,000. - PartNumber *int64 `type:"integer"` + // Set this parameter to true to confirm that you want to remove your permissions + // to change this bucket policy in the future. + ConfirmRemoveSelfBucketAccess *bool `location:"header" locationName:"x-amz-confirm-remove-self-bucket-access" type:"boolean"` - // Size of the uploaded part data. - Size *int64 `type:"integer"` + // The bucket policy as a JSON document. + // + // Policy is a required field + Policy *string `type:"string" required:"true"` } // String returns the string representation -func (s Part) String() string { +func (s PutBucketPolicyInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Part) GoString() string { +func (s PutBucketPolicyInput) GoString() string { return s.String() } -// SetETag sets the ETag field's value. -func (s *Part) SetETag(v string) *Part { - s.ETag = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Policy == nil { + invalidParams.Add(request.NewErrParamRequired("Policy")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetLastModified sets the LastModified field's value. -func (s *Part) SetLastModified(v time.Time) *Part { - s.LastModified = &v +// SetBucket sets the Bucket field's value. +func (s *PutBucketPolicyInput) SetBucket(v string) *PutBucketPolicyInput { + s.Bucket = &v return s } -// SetPartNumber sets the PartNumber field's value. -func (s *Part) SetPartNumber(v int64) *Part { - s.PartNumber = &v +func (s *PutBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetConfirmRemoveSelfBucketAccess sets the ConfirmRemoveSelfBucketAccess field's value. +func (s *PutBucketPolicyInput) SetConfirmRemoveSelfBucketAccess(v bool) *PutBucketPolicyInput { + s.ConfirmRemoveSelfBucketAccess = &v return s } -// SetSize sets the Size field's value. -func (s *Part) SetSize(v int64) *Part { - s.Size = &v +// SetPolicy sets the Policy field's value. +func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput { + s.Policy = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationRequest -type PutBucketAccelerateConfigurationInput struct { - _ struct{} `type:"structure" payload:"AccelerateConfiguration"` +type PutBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} - // Specifies the Accelerate Configuration you want to set for the bucket. - // - // AccelerateConfiguration is a required field - AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true"` +// String returns the string representation +func (s PutBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketPolicyOutput) GoString() string { + return s.String() +} + +type PutBucketReplicationInput struct { + _ struct{} `type:"structure" payload:"ReplicationConfiguration"` - // Name of the bucket for which the accelerate configuration is set. - // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A container for replication rules. You can add up to 1,000 rules. The maximum + // size of a replication configuration is 2 MB. + // + // ReplicationConfiguration is a required field + ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s PutBucketAccelerateConfigurationInput) String() string { +func (s PutBucketReplicationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketAccelerateConfigurationInput) GoString() string { +func (s PutBucketReplicationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketAccelerateConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketAccelerateConfigurationInput"} - if s.AccelerateConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("AccelerateConfiguration")) - } +func (s *PutBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketReplicationInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.ReplicationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationConfiguration")) + } + if s.ReplicationConfiguration != nil { + if err := s.ReplicationConfiguration.Validate(); err != nil { + invalidParams.AddNested("ReplicationConfiguration", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -15167,81 +19647,74 @@ func (s *PutBucketAccelerateConfigurationInput) Validate() error { return nil } -// SetAccelerateConfiguration sets the AccelerateConfiguration field's value. -func (s *PutBucketAccelerateConfigurationInput) SetAccelerateConfiguration(v *AccelerateConfiguration) *PutBucketAccelerateConfigurationInput { - s.AccelerateConfiguration = v +// SetBucket sets the Bucket field's value. +func (s *PutBucketReplicationInput) SetBucket(v string) *PutBucketReplicationInput { + s.Bucket = &v return s } -// SetBucket sets the Bucket field's value. -func (s *PutBucketAccelerateConfigurationInput) SetBucket(v string) *PutBucketAccelerateConfigurationInput { - s.Bucket = &v +func (s *PutBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetReplicationConfiguration sets the ReplicationConfiguration field's value. +func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationConfiguration) *PutBucketReplicationInput { + s.ReplicationConfiguration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationOutput -type PutBucketAccelerateConfigurationOutput struct { +type PutBucketReplicationOutput struct { _ struct{} `type:"structure"` } // String returns the string representation -func (s PutBucketAccelerateConfigurationOutput) String() string { +func (s PutBucketReplicationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketAccelerateConfigurationOutput) GoString() string { +func (s PutBucketReplicationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclRequest -type PutBucketAclInput struct { - _ struct{} `type:"structure" payload:"AccessControlPolicy"` - - // The canned ACL to apply to the bucket. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` - - AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` +type PutBucketRequestPaymentInput struct { + _ struct{} `type:"structure" payload:"RequestPaymentConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to list the objects in the bucket. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the bucket ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to create, overwrite, and delete any object in the bucket. - GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` - - // Allows grantee to write the ACL for the applicable bucket. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + // RequestPaymentConfiguration is a required field + RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s PutBucketAclInput) String() string { +func (s PutBucketRequestPaymentInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketAclInput) GoString() string { +func (s PutBucketRequestPaymentInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketAclInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketAclInput"} +func (s *PutBucketRequestPaymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketRequestPaymentInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.AccessControlPolicy != nil { - if err := s.AccessControlPolicy.Validate(); err != nil { - invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.RequestPaymentConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("RequestPaymentConfiguration")) + } + if s.RequestPaymentConfiguration != nil { + if err := s.RequestPaymentConfiguration.Validate(); err != nil { + invalidParams.AddNested("RequestPaymentConfiguration", err.(request.ErrInvalidParams)) } } @@ -15251,114 +19724,74 @@ func (s *PutBucketAclInput) Validate() error { return nil } -// SetACL sets the ACL field's value. -func (s *PutBucketAclInput) SetACL(v string) *PutBucketAclInput { - s.ACL = &v - return s -} - -// SetAccessControlPolicy sets the AccessControlPolicy field's value. -func (s *PutBucketAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutBucketAclInput { - s.AccessControlPolicy = v - return s -} - // SetBucket sets the Bucket field's value. -func (s *PutBucketAclInput) SetBucket(v string) *PutBucketAclInput { +func (s *PutBucketRequestPaymentInput) SetBucket(v string) *PutBucketRequestPaymentInput { s.Bucket = &v return s } -// SetGrantFullControl sets the GrantFullControl field's value. -func (s *PutBucketAclInput) SetGrantFullControl(v string) *PutBucketAclInput { - s.GrantFullControl = &v - return s -} - -// SetGrantRead sets the GrantRead field's value. -func (s *PutBucketAclInput) SetGrantRead(v string) *PutBucketAclInput { - s.GrantRead = &v - return s -} - -// SetGrantReadACP sets the GrantReadACP field's value. -func (s *PutBucketAclInput) SetGrantReadACP(v string) *PutBucketAclInput { - s.GrantReadACP = &v - return s -} - -// SetGrantWrite sets the GrantWrite field's value. -func (s *PutBucketAclInput) SetGrantWrite(v string) *PutBucketAclInput { - s.GrantWrite = &v - return s +func (s *PutBucketRequestPaymentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// SetGrantWriteACP sets the GrantWriteACP field's value. -func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput { - s.GrantWriteACP = &v +// SetRequestPaymentConfiguration sets the RequestPaymentConfiguration field's value. +func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *RequestPaymentConfiguration) *PutBucketRequestPaymentInput { + s.RequestPaymentConfiguration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclOutput -type PutBucketAclOutput struct { +type PutBucketRequestPaymentOutput struct { _ struct{} `type:"structure"` } // String returns the string representation -func (s PutBucketAclOutput) String() string { +func (s PutBucketRequestPaymentOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketAclOutput) GoString() string { +func (s PutBucketRequestPaymentOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationRequest -type PutBucketAnalyticsConfigurationInput struct { - _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` - - // The configuration and any analyses for the analytics filter. - // - // AnalyticsConfiguration is a required field - AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true"` +type PutBucketTaggingInput struct { + _ struct{} `type:"structure" payload:"Tagging"` - // The name of the bucket to which an analytics configuration is stored. - // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The identifier used to represent an analytics configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` + // Tagging is a required field + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s PutBucketAnalyticsConfigurationInput) String() string { +func (s PutBucketTaggingInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketAnalyticsConfigurationInput) GoString() string { +func (s PutBucketTaggingInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketAnalyticsConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketAnalyticsConfigurationInput"} - if s.AnalyticsConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("AnalyticsConfiguration")) - } +func (s *PutBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketTaggingInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) } - if s.AnalyticsConfiguration != nil { - if err := s.AnalyticsConfiguration.Validate(); err != nil { - invalidParams.AddNested("AnalyticsConfiguration", err.(request.ErrInvalidParams)) + if s.Tagging == nil { + invalidParams.Add(request.NewErrParamRequired("Tagging")) + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) } } @@ -15368,73 +19801,74 @@ func (s *PutBucketAnalyticsConfigurationInput) Validate() error { return nil } -// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value. -func (s *PutBucketAnalyticsConfigurationInput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *PutBucketAnalyticsConfigurationInput { - s.AnalyticsConfiguration = v - return s -} - // SetBucket sets the Bucket field's value. -func (s *PutBucketAnalyticsConfigurationInput) SetBucket(v string) *PutBucketAnalyticsConfigurationInput { +func (s *PutBucketTaggingInput) SetBucket(v string) *PutBucketTaggingInput { s.Bucket = &v return s } -// SetId sets the Id field's value. -func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyticsConfigurationInput { - s.Id = &v +func (s *PutBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetTagging sets the Tagging field's value. +func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput { + s.Tagging = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationOutput -type PutBucketAnalyticsConfigurationOutput struct { +type PutBucketTaggingOutput struct { _ struct{} `type:"structure"` } // String returns the string representation -func (s PutBucketAnalyticsConfigurationOutput) String() string { +func (s PutBucketTaggingOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketAnalyticsConfigurationOutput) GoString() string { +func (s PutBucketTaggingOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsRequest -type PutBucketCorsInput struct { - _ struct{} `type:"structure" payload:"CORSConfiguration"` +type PutBucketVersioningInput struct { + _ struct{} `type:"structure" payload:"VersioningConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // CORSConfiguration is a required field - CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true"` + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // VersioningConfiguration is a required field + VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s PutBucketCorsInput) String() string { +func (s PutBucketVersioningInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketCorsInput) GoString() string { +func (s PutBucketVersioningInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketCorsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketCorsInput"} +func (s *PutBucketVersioningInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketVersioningInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.CORSConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("CORSConfiguration")) + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) } - if s.CORSConfiguration != nil { - if err := s.CORSConfiguration.Validate(); err != nil { - invalidParams.AddNested("CORSConfiguration", err.(request.ErrInvalidParams)) - } + if s.VersioningConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("VersioningConfiguration")) } if invalidParams.Len() > 0 { @@ -15444,77 +19878,79 @@ func (s *PutBucketCorsInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *PutBucketCorsInput) SetBucket(v string) *PutBucketCorsInput { +func (s *PutBucketVersioningInput) SetBucket(v string) *PutBucketVersioningInput { s.Bucket = &v return s } -// SetCORSConfiguration sets the CORSConfiguration field's value. -func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBucketCorsInput { - s.CORSConfiguration = v +func (s *PutBucketVersioningInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetMFA sets the MFA field's value. +func (s *PutBucketVersioningInput) SetMFA(v string) *PutBucketVersioningInput { + s.MFA = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsOutput -type PutBucketCorsOutput struct { +// SetVersioningConfiguration sets the VersioningConfiguration field's value. +func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfiguration) *PutBucketVersioningInput { + s.VersioningConfiguration = v + return s +} + +type PutBucketVersioningOutput struct { _ struct{} `type:"structure"` } // String returns the string representation -func (s PutBucketCorsOutput) String() string { +func (s PutBucketVersioningOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketCorsOutput) GoString() string { +func (s PutBucketVersioningOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationRequest -type PutBucketInventoryConfigurationInput struct { - _ struct{} `type:"structure" payload:"InventoryConfiguration"` +type PutBucketWebsiteInput struct { + _ struct{} `type:"structure" payload:"WebsiteConfiguration"` - // The name of the bucket where the inventory configuration will be stored. - // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The ID used to identify the inventory configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` - - // Specifies the inventory configuration. - // - // InventoryConfiguration is a required field - InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true"` + // WebsiteConfiguration is a required field + WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s PutBucketInventoryConfigurationInput) String() string { +func (s PutBucketWebsiteInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketInventoryConfigurationInput) GoString() string { +func (s PutBucketWebsiteInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketInventoryConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketInventoryConfigurationInput"} +func (s *PutBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketWebsiteInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) } - if s.InventoryConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("InventoryConfiguration")) + if s.WebsiteConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("WebsiteConfiguration")) } - if s.InventoryConfiguration != nil { - if err := s.InventoryConfiguration.Validate(); err != nil { - invalidParams.AddNested("InventoryConfiguration", err.(request.ErrInvalidParams)) + if s.WebsiteConfiguration != nil { + if err := s.WebsiteConfiguration.Validate(); err != nil { + invalidParams.AddNested("WebsiteConfiguration", err.(request.ErrInvalidParams)) } } @@ -15525,67 +19961,106 @@ func (s *PutBucketInventoryConfigurationInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *PutBucketInventoryConfigurationInput) SetBucket(v string) *PutBucketInventoryConfigurationInput { +func (s *PutBucketWebsiteInput) SetBucket(v string) *PutBucketWebsiteInput { s.Bucket = &v return s } -// SetId sets the Id field's value. -func (s *PutBucketInventoryConfigurationInput) SetId(v string) *PutBucketInventoryConfigurationInput { - s.Id = &v - return s +func (s *PutBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// SetInventoryConfiguration sets the InventoryConfiguration field's value. -func (s *PutBucketInventoryConfigurationInput) SetInventoryConfiguration(v *InventoryConfiguration) *PutBucketInventoryConfigurationInput { - s.InventoryConfiguration = v +// SetWebsiteConfiguration sets the WebsiteConfiguration field's value. +func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) *PutBucketWebsiteInput { + s.WebsiteConfiguration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationOutput -type PutBucketInventoryConfigurationOutput struct { +type PutBucketWebsiteOutput struct { _ struct{} `type:"structure"` } // String returns the string representation -func (s PutBucketInventoryConfigurationOutput) String() string { +func (s PutBucketWebsiteOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketInventoryConfigurationOutput) GoString() string { +func (s PutBucketWebsiteOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationRequest -type PutBucketLifecycleConfigurationInput struct { - _ struct{} `type:"structure" payload:"LifecycleConfiguration"` +type PutObjectAclInput struct { + _ struct{} `type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } // String returns the string representation -func (s PutBucketLifecycleConfigurationInput) String() string { +func (s PutObjectAclInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketLifecycleConfigurationInput) GoString() string { +func (s PutObjectAclInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketLifecycleConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleConfigurationInput"} +func (s *PutObjectAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectAclInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.LifecycleConfiguration != nil { - if err := s.LifecycleConfiguration.Validate(); err != nil { - invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.AccessControlPolicy != nil { + if err := s.AccessControlPolicy.Validate(); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) } } @@ -15595,63 +20070,241 @@ func (s *PutBucketLifecycleConfigurationInput) Validate() error { return nil } +// SetACL sets the ACL field's value. +func (s *PutObjectAclInput) SetACL(v string) *PutObjectAclInput { + s.ACL = &v + return s +} + +// SetAccessControlPolicy sets the AccessControlPolicy field's value. +func (s *PutObjectAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutObjectAclInput { + s.AccessControlPolicy = v + return s +} + // SetBucket sets the Bucket field's value. -func (s *PutBucketLifecycleConfigurationInput) SetBucket(v string) *PutBucketLifecycleConfigurationInput { +func (s *PutObjectAclInput) SetBucket(v string) *PutObjectAclInput { s.Bucket = &v return s } -// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. -func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *BucketLifecycleConfiguration) *PutBucketLifecycleConfigurationInput { - s.LifecycleConfiguration = v +func (s *PutObjectAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutObjectAclInput) SetGrantFullControl(v string) *PutObjectAclInput { + s.GrantFullControl = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationOutput -type PutBucketLifecycleConfigurationOutput struct { +// SetGrantRead sets the GrantRead field's value. +func (s *PutObjectAclInput) SetGrantRead(v string) *PutObjectAclInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutObjectAclInput) SetGrantReadACP(v string) *PutObjectAclInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *PutObjectAclInput) SetGrantWrite(v string) *PutObjectAclInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutObjectAclInput) SetGrantWriteACP(v string) *PutObjectAclInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectAclInput) SetKey(v string) *PutObjectAclInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectAclInput) SetRequestPayer(v string) *PutObjectAclInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput { + s.VersionId = &v + return s +} + +type PutObjectAclOutput struct { _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } // String returns the string representation -func (s PutBucketLifecycleConfigurationOutput) String() string { +func (s PutObjectAclOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketLifecycleConfigurationOutput) GoString() string { +func (s PutObjectAclOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleRequest -type PutBucketLifecycleInput struct { - _ struct{} `type:"structure" payload:"LifecycleConfiguration"` +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput { + s.RequestCharged = &v + return s +} + +type PutObjectInput struct { + _ struct{} `type:"structure" payload:"Body"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Object data. + Body io.ReadSeeker `type:"blob"` + + // Name of the bucket to which the PUT operation was initiated. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The base64-encoded 128-bit MD5 digest of the part data. + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Object key for which the PUT operation was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // The Legal Hold status that you want to apply to the specified object. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The Object Lock mode that you want to apply to this object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when you want this object's Object Lock to expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + // (For example, "Key1=Value1") + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } // String returns the string representation -func (s PutBucketLifecycleInput) String() string { +func (s PutObjectInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketLifecycleInput) GoString() string { +func (s PutObjectInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketLifecycleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleInput"} +func (s *PutObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.LifecycleConfiguration != nil { - if err := s.LifecycleConfiguration.Validate(); err != nil { - invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) - } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) } if invalidParams.Len() > 0 { @@ -15660,290 +20313,245 @@ func (s *PutBucketLifecycleInput) Validate() error { return nil } -// SetBucket sets the Bucket field's value. -func (s *PutBucketLifecycleInput) SetBucket(v string) *PutBucketLifecycleInput { - s.Bucket = &v +// SetACL sets the ACL field's value. +func (s *PutObjectInput) SetACL(v string) *PutObjectInput { + s.ACL = &v return s } -// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. -func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfiguration) *PutBucketLifecycleInput { - s.LifecycleConfiguration = v +// SetBody sets the Body field's value. +func (s *PutObjectInput) SetBody(v io.ReadSeeker) *PutObjectInput { + s.Body = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleOutput -type PutBucketLifecycleOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketLifecycleOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketLifecycleOutput) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingRequest -type PutBucketLoggingInput struct { - _ struct{} `type:"structure" payload:"BucketLoggingStatus"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // BucketLoggingStatus is a required field - BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true"` -} - -// String returns the string representation -func (s PutBucketLoggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketLoggingInput) GoString() string { - return s.String() +// SetBucket sets the Bucket field's value. +func (s *PutObjectInput) SetBucket(v string) *PutObjectInput { + s.Bucket = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketLoggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketLoggingInput"} +func (s *PutObjectInput) getBucket() (v string) { if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.BucketLoggingStatus == nil { - invalidParams.Add(request.NewErrParamRequired("BucketLoggingStatus")) - } - if s.BucketLoggingStatus != nil { - if err := s.BucketLoggingStatus.Validate(); err != nil { - invalidParams.AddNested("BucketLoggingStatus", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams + return v } - return nil + return *s.Bucket } -// SetBucket sets the Bucket field's value. -func (s *PutBucketLoggingInput) SetBucket(v string) *PutBucketLoggingInput { - s.Bucket = &v +// SetCacheControl sets the CacheControl field's value. +func (s *PutObjectInput) SetCacheControl(v string) *PutObjectInput { + s.CacheControl = &v return s } -// SetBucketLoggingStatus sets the BucketLoggingStatus field's value. -func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) *PutBucketLoggingInput { - s.BucketLoggingStatus = v +// SetContentDisposition sets the ContentDisposition field's value. +func (s *PutObjectInput) SetContentDisposition(v string) *PutObjectInput { + s.ContentDisposition = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingOutput -type PutBucketLoggingOutput struct { - _ struct{} `type:"structure"` +// SetContentEncoding sets the ContentEncoding field's value. +func (s *PutObjectInput) SetContentEncoding(v string) *PutObjectInput { + s.ContentEncoding = &v + return s } -// String returns the string representation -func (s PutBucketLoggingOutput) String() string { - return awsutil.Prettify(s) +// SetContentLanguage sets the ContentLanguage field's value. +func (s *PutObjectInput) SetContentLanguage(v string) *PutObjectInput { + s.ContentLanguage = &v + return s } -// GoString returns the string representation -func (s PutBucketLoggingOutput) GoString() string { - return s.String() +// SetContentLength sets the ContentLength field's value. +func (s *PutObjectInput) SetContentLength(v int64) *PutObjectInput { + s.ContentLength = &v + return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationRequest -type PutBucketMetricsConfigurationInput struct { - _ struct{} `type:"structure" payload:"MetricsConfiguration"` - - // The name of the bucket for which the metrics configuration is set. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The ID used to identify the metrics configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` - - // Specifies the metrics configuration. - // - // MetricsConfiguration is a required field - MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true"` +// SetContentMD5 sets the ContentMD5 field's value. +func (s *PutObjectInput) SetContentMD5(v string) *PutObjectInput { + s.ContentMD5 = &v + return s } -// String returns the string representation -func (s PutBucketMetricsConfigurationInput) String() string { - return awsutil.Prettify(s) +// SetContentType sets the ContentType field's value. +func (s *PutObjectInput) SetContentType(v string) *PutObjectInput { + s.ContentType = &v + return s } -// GoString returns the string representation -func (s PutBucketMetricsConfigurationInput) GoString() string { - return s.String() +// SetExpires sets the Expires field's value. +func (s *PutObjectInput) SetExpires(v time.Time) *PutObjectInput { + s.Expires = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketMetricsConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketMetricsConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.MetricsConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("MetricsConfiguration")) - } - if s.MetricsConfiguration != nil { - if err := s.MetricsConfiguration.Validate(); err != nil { - invalidParams.AddNested("MetricsConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutObjectInput) SetGrantFullControl(v string) *PutObjectInput { + s.GrantFullControl = &v + return s } -// SetBucket sets the Bucket field's value. -func (s *PutBucketMetricsConfigurationInput) SetBucket(v string) *PutBucketMetricsConfigurationInput { - s.Bucket = &v +// SetGrantRead sets the GrantRead field's value. +func (s *PutObjectInput) SetGrantRead(v string) *PutObjectInput { + s.GrantRead = &v return s } -// SetId sets the Id field's value. -func (s *PutBucketMetricsConfigurationInput) SetId(v string) *PutBucketMetricsConfigurationInput { - s.Id = &v +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutObjectInput) SetGrantReadACP(v string) *PutObjectInput { + s.GrantReadACP = &v return s } -// SetMetricsConfiguration sets the MetricsConfiguration field's value. -func (s *PutBucketMetricsConfigurationInput) SetMetricsConfiguration(v *MetricsConfiguration) *PutBucketMetricsConfigurationInput { - s.MetricsConfiguration = v +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutObjectInput) SetGrantWriteACP(v string) *PutObjectInput { + s.GrantWriteACP = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationOutput -type PutBucketMetricsConfigurationOutput struct { - _ struct{} `type:"structure"` +// SetKey sets the Key field's value. +func (s *PutObjectInput) SetKey(v string) *PutObjectInput { + s.Key = &v + return s } -// String returns the string representation -func (s PutBucketMetricsConfigurationOutput) String() string { - return awsutil.Prettify(s) +// SetMetadata sets the Metadata field's value. +func (s *PutObjectInput) SetMetadata(v map[string]*string) *PutObjectInput { + s.Metadata = v + return s } -// GoString returns the string representation -func (s PutBucketMetricsConfigurationOutput) GoString() string { - return s.String() +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *PutObjectInput) SetObjectLockLegalHoldStatus(v string) *PutObjectInput { + s.ObjectLockLegalHoldStatus = &v + return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationRequest -type PutBucketNotificationConfigurationInput struct { - _ struct{} `type:"structure" payload:"NotificationConfiguration"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *PutObjectInput) SetObjectLockMode(v string) *PutObjectInput { + s.ObjectLockMode = &v + return s +} - // Container for specifying the notification configuration of the bucket. If - // this element is empty, notifications are turned off on the bucket. - // - // NotificationConfiguration is a required field - NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true"` +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *PutObjectInput) SetObjectLockRetainUntilDate(v time.Time) *PutObjectInput { + s.ObjectLockRetainUntilDate = &v + return s } -// String returns the string representation -func (s PutBucketNotificationConfigurationInput) String() string { - return awsutil.Prettify(s) +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectInput) SetRequestPayer(v string) *PutObjectInput { + s.RequestPayer = &v + return s } -// GoString returns the string representation -func (s PutBucketNotificationConfigurationInput) GoString() string { - return s.String() +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *PutObjectInput) SetSSECustomerAlgorithm(v string) *PutObjectInput { + s.SSECustomerAlgorithm = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketNotificationConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.NotificationConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) - } - if s.NotificationConfiguration != nil { - if err := s.NotificationConfiguration.Validate(); err != nil { - invalidParams.AddNested("NotificationConfiguration", err.(request.ErrInvalidParams)) - } - } +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *PutObjectInput) SetSSECustomerKey(v string) *PutObjectInput { + s.SSECustomerKey = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams +func (s *PutObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v } - return nil + return *s.SSECustomerKey } -// SetBucket sets the Bucket field's value. -func (s *PutBucketNotificationConfigurationInput) SetBucket(v string) *PutBucketNotificationConfigurationInput { - s.Bucket = &v +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput { + s.SSECustomerKeyMD5 = &v return s } -// SetNotificationConfiguration sets the NotificationConfiguration field's value. -func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v *NotificationConfiguration) *PutBucketNotificationConfigurationInput { - s.NotificationConfiguration = v +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput { + s.SSEKMSKeyId = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationOutput -type PutBucketNotificationConfigurationOutput struct { - _ struct{} `type:"structure"` +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *PutObjectInput) SetServerSideEncryption(v string) *PutObjectInput { + s.ServerSideEncryption = &v + return s } -// String returns the string representation -func (s PutBucketNotificationConfigurationOutput) String() string { - return awsutil.Prettify(s) +// SetStorageClass sets the StorageClass field's value. +func (s *PutObjectInput) SetStorageClass(v string) *PutObjectInput { + s.StorageClass = &v + return s } -// GoString returns the string representation -func (s PutBucketNotificationConfigurationOutput) GoString() string { - return s.String() +// SetTagging sets the Tagging field's value. +func (s *PutObjectInput) SetTagging(v string) *PutObjectInput { + s.Tagging = &v + return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationRequest -type PutBucketNotificationInput struct { - _ struct{} `type:"structure" payload:"NotificationConfiguration"` +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput { + s.WebsiteRedirectLocation = &v + return s +} + +type PutObjectLegalHoldInput struct { + _ struct{} `type:"structure" payload:"LegalHold"` + // The bucket containing the object that you want to place a Legal Hold on. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // NotificationConfiguration is a required field - NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true"` + // The key name for the object that you want to place a Legal Hold on. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Container element for the Legal Hold configuration you want to apply to the + // specified object. + LegalHold *ObjectLockLegalHold `locationName:"LegalHold" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The version ID of the object that you want to place a Legal Hold on. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } // String returns the string representation -func (s PutBucketNotificationInput) String() string { +func (s PutObjectLegalHoldInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketNotificationInput) GoString() string { +func (s PutObjectLegalHoldInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketNotificationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationInput"} +func (s *PutObjectLegalHoldInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectLegalHoldInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.NotificationConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) } if invalidParams.Len() > 0 { @@ -15953,63 +20561,104 @@ func (s *PutBucketNotificationInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *PutBucketNotificationInput) SetBucket(v string) *PutBucketNotificationInput { +func (s *PutObjectLegalHoldInput) SetBucket(v string) *PutObjectLegalHoldInput { s.Bucket = &v return s } -// SetNotificationConfiguration sets the NotificationConfiguration field's value. -func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *NotificationConfigurationDeprecated) *PutBucketNotificationInput { - s.NotificationConfiguration = v +func (s *PutObjectLegalHoldInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *PutObjectLegalHoldInput) SetKey(v string) *PutObjectLegalHoldInput { + s.Key = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationOutput -type PutBucketNotificationOutput struct { +// SetLegalHold sets the LegalHold field's value. +func (s *PutObjectLegalHoldInput) SetLegalHold(v *ObjectLockLegalHold) *PutObjectLegalHoldInput { + s.LegalHold = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectLegalHoldInput) SetRequestPayer(v string) *PutObjectLegalHoldInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectLegalHoldInput) SetVersionId(v string) *PutObjectLegalHoldInput { + s.VersionId = &v + return s +} + +type PutObjectLegalHoldOutput struct { _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } // String returns the string representation -func (s PutBucketNotificationOutput) String() string { +func (s PutObjectLegalHoldOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketNotificationOutput) GoString() string { +func (s PutObjectLegalHoldOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyRequest -type PutBucketPolicyInput struct { - _ struct{} `type:"structure" payload:"Policy"` +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectLegalHoldOutput) SetRequestCharged(v string) *PutObjectLegalHoldOutput { + s.RequestCharged = &v + return s +} +type PutObjectLockConfigurationInput struct { + _ struct{} `type:"structure" payload:"ObjectLockConfiguration"` + + // The bucket whose Object Lock configuration you want to create or replace. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The bucket policy as a JSON document. - // - // Policy is a required field - Policy *string `type:"string" required:"true"` + // The Object Lock configuration that you want to apply to the specified bucket. + ObjectLockConfiguration *ObjectLockConfiguration `locationName:"ObjectLockConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` } // String returns the string representation -func (s PutBucketPolicyInput) String() string { +func (s PutObjectLockConfigurationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketPolicyInput) GoString() string { +func (s PutObjectLockConfigurationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketPolicyInput"} +func (s *PutObjectLockConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectLockConfigurationInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Policy == nil { - invalidParams.Add(request.NewErrParamRequired("Policy")) + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) } if invalidParams.Len() > 0 { @@ -16019,138 +20668,210 @@ func (s *PutBucketPolicyInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *PutBucketPolicyInput) SetBucket(v string) *PutBucketPolicyInput { +func (s *PutObjectLockConfigurationInput) SetBucket(v string) *PutObjectLockConfigurationInput { s.Bucket = &v return s } -// SetPolicy sets the Policy field's value. -func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput { - s.Policy = &v +func (s *PutObjectLockConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetObjectLockConfiguration sets the ObjectLockConfiguration field's value. +func (s *PutObjectLockConfigurationInput) SetObjectLockConfiguration(v *ObjectLockConfiguration) *PutObjectLockConfigurationInput { + s.ObjectLockConfiguration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyOutput -type PutBucketPolicyOutput struct { +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectLockConfigurationInput) SetRequestPayer(v string) *PutObjectLockConfigurationInput { + s.RequestPayer = &v + return s +} + +// SetToken sets the Token field's value. +func (s *PutObjectLockConfigurationInput) SetToken(v string) *PutObjectLockConfigurationInput { + s.Token = &v + return s +} + +type PutObjectLockConfigurationOutput struct { _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } // String returns the string representation -func (s PutBucketPolicyOutput) String() string { +func (s PutObjectLockConfigurationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketPolicyOutput) GoString() string { +func (s PutObjectLockConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationRequest -type PutBucketReplicationInput struct { - _ struct{} `type:"structure" payload:"ReplicationConfiguration"` +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectLockConfigurationOutput) SetRequestCharged(v string) *PutObjectLockConfigurationOutput { + s.RequestCharged = &v + return s +} - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +type PutObjectOutput struct { + _ struct{} `type:"structure"` - // Container for replication rules. You can add as many as 1,000 rules. Total - // replication configuration size can be up to 2 MB. - // - // ReplicationConfiguration is a required field - ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true"` + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } // String returns the string representation -func (s PutBucketReplicationInput) String() string { +func (s PutObjectOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketReplicationInput) GoString() string { +func (s PutObjectOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketReplicationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketReplicationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.ReplicationConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("ReplicationConfiguration")) - } - if s.ReplicationConfiguration != nil { - if err := s.ReplicationConfiguration.Validate(); err != nil { - invalidParams.AddNested("ReplicationConfiguration", err.(request.ErrInvalidParams)) - } - } +// SetETag sets the ETag field's value. +func (s *PutObjectOutput) SetETag(v string) *PutObjectOutput { + s.ETag = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetExpiration sets the Expiration field's value. +func (s *PutObjectOutput) SetExpiration(v string) *PutObjectOutput { + s.Expiration = &v + return s } -// SetBucket sets the Bucket field's value. -func (s *PutBucketReplicationInput) SetBucket(v string) *PutBucketReplicationInput { - s.Bucket = &v +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectOutput) SetRequestCharged(v string) *PutObjectOutput { + s.RequestCharged = &v return s } -// SetReplicationConfiguration sets the ReplicationConfiguration field's value. -func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationConfiguration) *PutBucketReplicationInput { - s.ReplicationConfiguration = v +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *PutObjectOutput) SetSSECustomerAlgorithm(v string) *PutObjectOutput { + s.SSECustomerAlgorithm = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationOutput -type PutBucketReplicationOutput struct { - _ struct{} `type:"structure"` +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *PutObjectOutput) SetSSECustomerKeyMD5(v string) *PutObjectOutput { + s.SSECustomerKeyMD5 = &v + return s } -// String returns the string representation -func (s PutBucketReplicationOutput) String() string { - return awsutil.Prettify(s) +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput { + s.SSEKMSKeyId = &v + return s } -// GoString returns the string representation -func (s PutBucketReplicationOutput) GoString() string { - return s.String() +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *PutObjectOutput) SetServerSideEncryption(v string) *PutObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput { + s.VersionId = &v + return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentRequest -type PutBucketRequestPaymentInput struct { - _ struct{} `type:"structure" payload:"RequestPaymentConfiguration"` +type PutObjectRetentionInput struct { + _ struct{} `type:"structure" payload:"Retention"` + // The bucket that contains the object you want to apply this Object Retention + // configuration to. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // RequestPaymentConfiguration is a required field - RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true"` + // Indicates whether this operation should bypass Governance-mode restrictions.j + BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + + // The key name for the object that you want to apply this Object Retention + // configuration to. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The container element for the Object Retention configuration. + Retention *ObjectLockRetention `locationName:"Retention" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The version ID for the object that you want to apply this Object Retention + // configuration to. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } // String returns the string representation -func (s PutBucketRequestPaymentInput) String() string { +func (s PutObjectRetentionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketRequestPaymentInput) GoString() string { +func (s PutObjectRetentionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketRequestPaymentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketRequestPaymentInput"} +func (s *PutObjectRetentionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectRetentionInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.RequestPaymentConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("RequestPaymentConfiguration")) + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) } - if s.RequestPaymentConfiguration != nil { - if err := s.RequestPaymentConfiguration.Validate(); err != nil { - invalidParams.AddNested("RequestPaymentConfiguration", err.(request.ErrInvalidParams)) - } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) } if invalidParams.Len() > 0 { @@ -16160,59 +20881,112 @@ func (s *PutBucketRequestPaymentInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *PutBucketRequestPaymentInput) SetBucket(v string) *PutBucketRequestPaymentInput { +func (s *PutObjectRetentionInput) SetBucket(v string) *PutObjectRetentionInput { s.Bucket = &v return s } -// SetRequestPaymentConfiguration sets the RequestPaymentConfiguration field's value. -func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *RequestPaymentConfiguration) *PutBucketRequestPaymentInput { - s.RequestPaymentConfiguration = v +func (s *PutObjectRetentionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *PutObjectRetentionInput) SetBypassGovernanceRetention(v bool) *PutObjectRetentionInput { + s.BypassGovernanceRetention = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentOutput -type PutBucketRequestPaymentOutput struct { +// SetKey sets the Key field's value. +func (s *PutObjectRetentionInput) SetKey(v string) *PutObjectRetentionInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectRetentionInput) SetRequestPayer(v string) *PutObjectRetentionInput { + s.RequestPayer = &v + return s +} + +// SetRetention sets the Retention field's value. +func (s *PutObjectRetentionInput) SetRetention(v *ObjectLockRetention) *PutObjectRetentionInput { + s.Retention = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectRetentionInput) SetVersionId(v string) *PutObjectRetentionInput { + s.VersionId = &v + return s +} + +type PutObjectRetentionOutput struct { _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } // String returns the string representation -func (s PutBucketRequestPaymentOutput) String() string { +func (s PutObjectRetentionOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketRequestPaymentOutput) GoString() string { +func (s PutObjectRetentionOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingRequest -type PutBucketTaggingInput struct { +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectRetentionOutput) SetRequestCharged(v string) *PutObjectRetentionOutput { + s.RequestCharged = &v + return s +} + +type PutObjectTaggingInput struct { _ struct{} `type:"structure" payload:"Tagging"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + // Tagging is a required field - Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"` + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } // String returns the string representation -func (s PutBucketTaggingInput) String() string { +func (s PutObjectTaggingInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketTaggingInput) GoString() string { +func (s PutObjectTaggingInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketTaggingInput"} +func (s *PutObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectTaggingInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } if s.Tagging == nil { invalidParams.Add(request.NewErrParamRequired("Tagging")) } @@ -16229,65 +21003,98 @@ func (s *PutBucketTaggingInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *PutBucketTaggingInput) SetBucket(v string) *PutBucketTaggingInput { +func (s *PutObjectTaggingInput) SetBucket(v string) *PutObjectTaggingInput { s.Bucket = &v return s } +func (s *PutObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput { + s.Key = &v + return s +} + // SetTagging sets the Tagging field's value. -func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput { +func (s *PutObjectTaggingInput) SetTagging(v *Tagging) *PutObjectTaggingInput { s.Tagging = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingOutput -type PutBucketTaggingOutput struct { +// SetVersionId sets the VersionId field's value. +func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput { + s.VersionId = &v + return s +} + +type PutObjectTaggingOutput struct { _ struct{} `type:"structure"` + + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } // String returns the string representation -func (s PutBucketTaggingOutput) String() string { +func (s PutObjectTaggingOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketTaggingOutput) GoString() string { +func (s PutObjectTaggingOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningRequest -type PutBucketVersioningInput struct { - _ struct{} `type:"structure" payload:"VersioningConfiguration"` +// SetVersionId sets the VersionId field's value. +func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput { + s.VersionId = &v + return s +} +type PutPublicAccessBlockInput struct { + _ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"` + + // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you + // want to set. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The concatenation of the authentication device's serial number, a space, - // and the value that is displayed on your authentication device. - MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - - // VersioningConfiguration is a required field - VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true"` + // The PublicAccessBlock configuration that you want to apply to this Amazon + // S3 bucket. You can enable the configuration options in any combination. For + // more information about when Amazon S3 considers a bucket or object public, + // see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) + // in the Amazon Simple Storage Service Developer Guide. + // + // PublicAccessBlockConfiguration is a required field + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `locationName:"PublicAccessBlockConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s PutBucketVersioningInput) String() string { +func (s PutPublicAccessBlockInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketVersioningInput) GoString() string { +func (s PutPublicAccessBlockInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketVersioningInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketVersioningInput"} +func (s *PutPublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutPublicAccessBlockInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.VersioningConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("VersioningConfiguration")) + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.PublicAccessBlockConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("PublicAccessBlockConfiguration")) } if invalidParams.Len() > 0 { @@ -16297,72 +21104,81 @@ func (s *PutBucketVersioningInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *PutBucketVersioningInput) SetBucket(v string) *PutBucketVersioningInput { +func (s *PutPublicAccessBlockInput) SetBucket(v string) *PutPublicAccessBlockInput { s.Bucket = &v return s } -// SetMFA sets the MFA field's value. -func (s *PutBucketVersioningInput) SetMFA(v string) *PutBucketVersioningInput { - s.MFA = &v - return s +func (s *PutPublicAccessBlockInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// SetVersioningConfiguration sets the VersioningConfiguration field's value. -func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfiguration) *PutBucketVersioningInput { - s.VersioningConfiguration = v +// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value. +func (s *PutPublicAccessBlockInput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *PutPublicAccessBlockInput { + s.PublicAccessBlockConfiguration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningOutput -type PutBucketVersioningOutput struct { +type PutPublicAccessBlockOutput struct { _ struct{} `type:"structure"` } // String returns the string representation -func (s PutBucketVersioningOutput) String() string { +func (s PutPublicAccessBlockOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketVersioningOutput) GoString() string { +func (s PutPublicAccessBlockOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteRequest -type PutBucketWebsiteInput struct { - _ struct{} `type:"structure" payload:"WebsiteConfiguration"` +// A container for specifying the configuration for publication of messages +// to an Amazon Simple Queue Service (Amazon SQS) queue.when Amazon S3 detects +// specified events. +type QueueConfiguration struct { + _ struct{} `type:"structure"` - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - // WebsiteConfiguration is a required field - WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true"` + // A container for object key name filtering rules. For information about key + // name filtering, see Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 + // will publish a message when it detects events of the specified type. + // + // QueueArn is a required field + QueueArn *string `locationName:"Queue" type:"string" required:"true"` } // String returns the string representation -func (s PutBucketWebsiteInput) String() string { +func (s QueueConfiguration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketWebsiteInput) GoString() string { +func (s QueueConfiguration) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketWebsiteInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketWebsiteInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.WebsiteConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("WebsiteConfiguration")) +func (s *QueueConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "QueueConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) } - if s.WebsiteConfiguration != nil { - if err := s.WebsiteConfiguration.Validate(); err != nil { - invalidParams.AddNested("WebsiteConfiguration", err.(request.ErrInvalidParams)) - } + if s.QueueArn == nil { + invalidParams.Add(request.NewErrParamRequired("QueueArn")) } if invalidParams.Len() > 0 { @@ -16371,322 +21187,391 @@ func (s *PutBucketWebsiteInput) Validate() error { return nil } -// SetBucket sets the Bucket field's value. -func (s *PutBucketWebsiteInput) SetBucket(v string) *PutBucketWebsiteInput { - s.Bucket = &v +// SetEvents sets the Events field's value. +func (s *QueueConfiguration) SetEvents(v []*string) *QueueConfiguration { + s.Events = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *QueueConfiguration) SetFilter(v *NotificationConfigurationFilter) *QueueConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *QueueConfiguration) SetId(v string) *QueueConfiguration { + s.Id = &v + return s +} + +// SetQueueArn sets the QueueArn field's value. +func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration { + s.QueueArn = &v + return s +} + +type QueueConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // The bucket event for which to send notifications. + // + // Deprecated: Event has been deprecated + Event *string `deprecated:"true" type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + Queue *string `type:"string"` +} + +// String returns the string representation +func (s QueueConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueueConfigurationDeprecated) GoString() string { + return s.String() +} + +// SetEvent sets the Event field's value. +func (s *QueueConfigurationDeprecated) SetEvent(v string) *QueueConfigurationDeprecated { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *QueueConfigurationDeprecated) SetEvents(v []*string) *QueueConfigurationDeprecated { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *QueueConfigurationDeprecated) SetId(v string) *QueueConfigurationDeprecated { + s.Id = &v return s } -// SetWebsiteConfiguration sets the WebsiteConfiguration field's value. -func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) *PutBucketWebsiteInput { - s.WebsiteConfiguration = v +// SetQueue sets the Queue field's value. +func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDeprecated { + s.Queue = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteOutput -type PutBucketWebsiteOutput struct { - _ struct{} `type:"structure"` +type RecordsEvent struct { + _ struct{} `locationName:"RecordsEvent" type:"structure" payload:"Payload"` + + // The byte array of partial, one or more result records. + // + // Payload is automatically base64 encoded/decoded by the SDK. + Payload []byte `type:"blob"` } // String returns the string representation -func (s PutBucketWebsiteOutput) String() string { +func (s RecordsEvent) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketWebsiteOutput) GoString() string { +func (s RecordsEvent) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclRequest -type PutObjectAclInput struct { - _ struct{} `type:"structure" payload:"AccessControlPolicy"` - - // The canned ACL to apply to the object. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - - AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +// SetPayload sets the Payload field's value. +func (s *RecordsEvent) SetPayload(v []byte) *RecordsEvent { + s.Payload = v + return s +} - // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` +// The RecordsEvent is and event in the SelectObjectContentEventStream group of events. +func (s *RecordsEvent) eventSelectObjectContentEventStream() {} - // Allows grantee to list the objects in the bucket. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` +// UnmarshalEvent unmarshals the EventStream Message into the RecordsEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *RecordsEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + s.Payload = make([]byte, len(msg.Payload)) + copy(s.Payload, msg.Payload) + return nil +} - // Allows grantee to read the bucket ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` +type Redirect struct { + _ struct{} `type:"structure"` - // Allows grantee to create, overwrite, and delete any object in the bucket. - GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + // The host name to use in the redirect request. + HostName *string `type:"string"` - // Allows grantee to write the ACL for the applicable bucket. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + // The HTTP redirect code to use on the response. Not required if one of the + // siblings is present. + HttpRedirectCode *string `type:"string"` - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + // Protocol to use (http, https) when redirecting requests. The default is the + // protocol that is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // The object key prefix to use in the redirect request. For example, to redirect + // requests for all pages with prefix docs/ (objects in the docs/ folder) to + // documents/, you can set a condition block with KeyPrefixEquals set to docs/ + // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required + // if one of the siblings is present. Can be present only if ReplaceKeyWith + // is not provided. + ReplaceKeyPrefixWith *string `type:"string"` - // VersionId used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` + // The specific object key to use in the redirect request. For example, redirect + // request to error.html. Not required if one of the sibling is present. Can + // be present only if ReplaceKeyPrefixWith is not provided. + ReplaceKeyWith *string `type:"string"` } // String returns the string representation -func (s PutObjectAclInput) String() string { +func (s Redirect) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutObjectAclInput) GoString() string { +func (s Redirect) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutObjectAclInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutObjectAclInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.AccessControlPolicy != nil { - if err := s.AccessControlPolicy.Validate(); err != nil { - invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetACL sets the ACL field's value. -func (s *PutObjectAclInput) SetACL(v string) *PutObjectAclInput { - s.ACL = &v +// SetHostName sets the HostName field's value. +func (s *Redirect) SetHostName(v string) *Redirect { + s.HostName = &v return s } -// SetAccessControlPolicy sets the AccessControlPolicy field's value. -func (s *PutObjectAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutObjectAclInput { - s.AccessControlPolicy = v +// SetHttpRedirectCode sets the HttpRedirectCode field's value. +func (s *Redirect) SetHttpRedirectCode(v string) *Redirect { + s.HttpRedirectCode = &v return s } -// SetBucket sets the Bucket field's value. -func (s *PutObjectAclInput) SetBucket(v string) *PutObjectAclInput { - s.Bucket = &v +// SetProtocol sets the Protocol field's value. +func (s *Redirect) SetProtocol(v string) *Redirect { + s.Protocol = &v return s } -// SetGrantFullControl sets the GrantFullControl field's value. -func (s *PutObjectAclInput) SetGrantFullControl(v string) *PutObjectAclInput { - s.GrantFullControl = &v +// SetReplaceKeyPrefixWith sets the ReplaceKeyPrefixWith field's value. +func (s *Redirect) SetReplaceKeyPrefixWith(v string) *Redirect { + s.ReplaceKeyPrefixWith = &v return s } -// SetGrantRead sets the GrantRead field's value. -func (s *PutObjectAclInput) SetGrantRead(v string) *PutObjectAclInput { - s.GrantRead = &v +// SetReplaceKeyWith sets the ReplaceKeyWith field's value. +func (s *Redirect) SetReplaceKeyWith(v string) *Redirect { + s.ReplaceKeyWith = &v return s } -// SetGrantReadACP sets the GrantReadACP field's value. -func (s *PutObjectAclInput) SetGrantReadACP(v string) *PutObjectAclInput { - s.GrantReadACP = &v - return s +type RedirectAllRequestsTo struct { + _ struct{} `type:"structure"` + + // Name of the host where requests will be redirected. + // + // HostName is a required field + HostName *string `type:"string" required:"true"` + + // Protocol to use (http, https) when redirecting requests. The default is the + // protocol that is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` } -// SetGrantWrite sets the GrantWrite field's value. -func (s *PutObjectAclInput) SetGrantWrite(v string) *PutObjectAclInput { - s.GrantWrite = &v - return s +// String returns the string representation +func (s RedirectAllRequestsTo) String() string { + return awsutil.Prettify(s) } -// SetGrantWriteACP sets the GrantWriteACP field's value. -func (s *PutObjectAclInput) SetGrantWriteACP(v string) *PutObjectAclInput { - s.GrantWriteACP = &v - return s +// GoString returns the string representation +func (s RedirectAllRequestsTo) GoString() string { + return s.String() } -// SetKey sets the Key field's value. -func (s *PutObjectAclInput) SetKey(v string) *PutObjectAclInput { - s.Key = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *RedirectAllRequestsTo) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RedirectAllRequestsTo"} + if s.HostName == nil { + invalidParams.Add(request.NewErrParamRequired("HostName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetRequestPayer sets the RequestPayer field's value. -func (s *PutObjectAclInput) SetRequestPayer(v string) *PutObjectAclInput { - s.RequestPayer = &v +// SetHostName sets the HostName field's value. +func (s *RedirectAllRequestsTo) SetHostName(v string) *RedirectAllRequestsTo { + s.HostName = &v return s } -// SetVersionId sets the VersionId field's value. -func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput { - s.VersionId = &v +// SetProtocol sets the Protocol field's value. +func (s *RedirectAllRequestsTo) SetProtocol(v string) *RedirectAllRequestsTo { + s.Protocol = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclOutput -type PutObjectAclOutput struct { +// A container for replication rules. You can add up to 1,000 rules. The maximum +// size of a replication configuration is 2 MB. +type ReplicationConfiguration struct { _ struct{} `type:"structure"` - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + // The Amazon Resource Name (ARN) of the AWS Identity and Access Management + // (IAM) role that Amazon S3 can assume when replicating the objects. + // + // Role is a required field + Role *string `type:"string" required:"true"` + + // A container for one or more replication rules. A replication configuration + // must have at least one rule and can contain a maximum of 1,000 rules. + // + // Rules is a required field + Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` } // String returns the string representation -func (s PutObjectAclOutput) String() string { +func (s ReplicationConfiguration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutObjectAclOutput) GoString() string { +func (s ReplicationConfiguration) GoString() string { return s.String() } -// SetRequestCharged sets the RequestCharged field's value. -func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput { - s.RequestCharged = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRequest -type PutObjectInput struct { - _ struct{} `type:"structure" payload:"Body"` - - // The canned ACL to apply to the object. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - - // Object data. - Body io.ReadSeeker `type:"blob"` - - // Name of the bucket to which the PUT operation was initiated. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // Size of the body in bytes. This parameter is useful when the size of the - // body cannot be determined automatically. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationConfiguration"} + if s.Role == nil { + invalidParams.Add(request.NewErrParamRequired("Role")) + } + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } - // The date and time at which the object is no longer cacheable. - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` +// SetRole sets the Role field's value. +func (s *ReplicationConfiguration) SetRole(v string) *ReplicationConfiguration { + s.Role = &v + return s +} - // Allows grantee to read the object data and its metadata. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` +// SetRules sets the Rules field's value. +func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationConfiguration { + s.Rules = v + return s +} - // Allows grantee to read the object ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` +// A container for information about a specific replication rule. +type ReplicationRule struct { + _ struct{} `type:"structure"` - // Allows grantee to write the ACL for the applicable object. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + // Specifies whether Amazon S3 should replicate delete makers. + DeleteMarkerReplication *DeleteMarkerReplication `type:"structure"` - // Object key for which the PUT operation was initiated. + // A container for information about the replication destination. // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + // Destination is a required field + Destination *Destination `type:"structure" required:"true"` - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT - // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + // A filter that identifies the subset of objects to which the replication rule + // applies. A Filter must specify exactly one Prefix, Tag, or an And child element. + Filter *ReplicationRuleFilter `type:"structure"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + // A unique identifier for the rule. The maximum value is 255 characters. + ID *string `type:"string"` - // The type of storage to use for the object. Defaults to 'STANDARD'. - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + // An object keyname prefix that identifies the object or objects to which the + // rule applies. The maximum prefix length is 1,024 characters. + // + // Deprecated: Prefix has been deprecated + Prefix *string `deprecated:"true" type:"string"` - // The tag-set for the object. The tag-set must be encoded as URL Query parameters - Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + // The priority associated with the rule. If you specify multiple rules in a + // replication configuration, Amazon S3 prioritizes the rules to prevent conflicts + // when filtering. If two or more rules identify the same object based on a + // specified filter, the rule with higher priority takes precedence. For example: + // + // * Same object quality prefix based filter criteria If prefixes you specified + // in multiple rules overlap + // + // * Same object qualify tag based filter criteria specified in multiple + // rules + // + // For more information, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) + // in the Amazon S3 Developer Guide. + Priority *int64 `type:"integer"` + + // A container that describes additional filters for identifying the source + // objects that you want to replicate. You can choose to enable or disable the + // replication of these objects. Currently, Amazon S3 supports only the filter + // that you can specify for objects created with server-side encryption using + // an AWS KMS-Managed Key (SSE-KMS). + // + // If you want Amazon S3 to replicate objects created with server-side encryption + // using AWS KMS-Managed Keys. + SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"` - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` + // If status isn't enabled, the rule is ignored. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` } // String returns the string representation -func (s PutObjectInput) String() string { +func (s ReplicationRule) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutObjectInput) GoString() string { +func (s ReplicationRule) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutObjectInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) +func (s *ReplicationRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRule"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.SourceSelectionCriteria != nil { + if err := s.SourceSelectionCriteria.Validate(); err != nil { + invalidParams.AddNested("SourceSelectionCriteria", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -16695,254 +21580,242 @@ func (s *PutObjectInput) Validate() error { return nil } -// SetACL sets the ACL field's value. -func (s *PutObjectInput) SetACL(v string) *PutObjectInput { - s.ACL = &v +// SetDeleteMarkerReplication sets the DeleteMarkerReplication field's value. +func (s *ReplicationRule) SetDeleteMarkerReplication(v *DeleteMarkerReplication) *ReplicationRule { + s.DeleteMarkerReplication = v return s } -// SetBody sets the Body field's value. -func (s *PutObjectInput) SetBody(v io.ReadSeeker) *PutObjectInput { - s.Body = v +// SetDestination sets the Destination field's value. +func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule { + s.Destination = v return s } -// SetBucket sets the Bucket field's value. -func (s *PutObjectInput) SetBucket(v string) *PutObjectInput { - s.Bucket = &v +// SetFilter sets the Filter field's value. +func (s *ReplicationRule) SetFilter(v *ReplicationRuleFilter) *ReplicationRule { + s.Filter = v return s } -// SetCacheControl sets the CacheControl field's value. -func (s *PutObjectInput) SetCacheControl(v string) *PutObjectInput { - s.CacheControl = &v +// SetID sets the ID field's value. +func (s *ReplicationRule) SetID(v string) *ReplicationRule { + s.ID = &v return s } -// SetContentDisposition sets the ContentDisposition field's value. -func (s *PutObjectInput) SetContentDisposition(v string) *PutObjectInput { - s.ContentDisposition = &v +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule { + s.Prefix = &v return s } -// SetContentEncoding sets the ContentEncoding field's value. -func (s *PutObjectInput) SetContentEncoding(v string) *PutObjectInput { - s.ContentEncoding = &v +// SetPriority sets the Priority field's value. +func (s *ReplicationRule) SetPriority(v int64) *ReplicationRule { + s.Priority = &v return s } -// SetContentLanguage sets the ContentLanguage field's value. -func (s *PutObjectInput) SetContentLanguage(v string) *PutObjectInput { - s.ContentLanguage = &v +// SetSourceSelectionCriteria sets the SourceSelectionCriteria field's value. +func (s *ReplicationRule) SetSourceSelectionCriteria(v *SourceSelectionCriteria) *ReplicationRule { + s.SourceSelectionCriteria = v return s } -// SetContentLength sets the ContentLength field's value. -func (s *PutObjectInput) SetContentLength(v int64) *PutObjectInput { - s.ContentLength = &v +// SetStatus sets the Status field's value. +func (s *ReplicationRule) SetStatus(v string) *ReplicationRule { + s.Status = &v return s } -// SetContentType sets the ContentType field's value. -func (s *PutObjectInput) SetContentType(v string) *PutObjectInput { - s.ContentType = &v - return s -} +type ReplicationRuleAndOperator struct { + _ struct{} `type:"structure"` -// SetExpires sets the Expires field's value. -func (s *PutObjectInput) SetExpires(v time.Time) *PutObjectInput { - s.Expires = &v - return s -} + Prefix *string `type:"string"` -// SetGrantFullControl sets the GrantFullControl field's value. -func (s *PutObjectInput) SetGrantFullControl(v string) *PutObjectInput { - s.GrantFullControl = &v - return s + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` } -// SetGrantRead sets the GrantRead field's value. -func (s *PutObjectInput) SetGrantRead(v string) *PutObjectInput { - s.GrantRead = &v - return s +// String returns the string representation +func (s ReplicationRuleAndOperator) String() string { + return awsutil.Prettify(s) } -// SetGrantReadACP sets the GrantReadACP field's value. -func (s *PutObjectInput) SetGrantReadACP(v string) *PutObjectInput { - s.GrantReadACP = &v - return s +// GoString returns the string representation +func (s ReplicationRuleAndOperator) GoString() string { + return s.String() } -// SetGrantWriteACP sets the GrantWriteACP field's value. -func (s *PutObjectInput) SetGrantWriteACP(v string) *PutObjectInput { - s.GrantWriteACP = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationRuleAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } -// SetKey sets the Key field's value. -func (s *PutObjectInput) SetKey(v string) *PutObjectInput { - s.Key = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetMetadata sets the Metadata field's value. -func (s *PutObjectInput) SetMetadata(v map[string]*string) *PutObjectInput { - s.Metadata = v +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRuleAndOperator) SetPrefix(v string) *ReplicationRuleAndOperator { + s.Prefix = &v return s } -// SetRequestPayer sets the RequestPayer field's value. -func (s *PutObjectInput) SetRequestPayer(v string) *PutObjectInput { - s.RequestPayer = &v +// SetTags sets the Tags field's value. +func (s *ReplicationRuleAndOperator) SetTags(v []*Tag) *ReplicationRuleAndOperator { + s.Tags = v return s } -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *PutObjectInput) SetSSECustomerAlgorithm(v string) *PutObjectInput { - s.SSECustomerAlgorithm = &v - return s -} +// A filter that identifies the subset of objects to which the replication rule +// applies. A Filter must specify exactly one Prefix, Tag, or an And child element. +type ReplicationRuleFilter struct { + _ struct{} `type:"structure"` -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *PutObjectInput) SetSSECustomerKey(v string) *PutObjectInput { - s.SSECustomerKey = &v - return s + // A container for specifying rule filters. The filters determine the subset + // of objects to which the rule applies. This element is required only if you + // specify more than one filter. For example: + // + // * If you specify both a Prefix and a Tag filter, wrap these filters in + // an And tag. + // + // * If you specify a filter based on multiple tags, wrap the Tag elements + // in an And tag. + And *ReplicationRuleAndOperator `type:"structure"` + + // An object keyname prefix that identifies the subset of objects to which the + // rule applies. + Prefix *string `type:"string"` + + // A container for specifying a tag key and value. + // + // The rule applies only to objects that have the tag in their tag set. + Tag *Tag `type:"structure"` } -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput { - s.SSECustomerKeyMD5 = &v - return s +// String returns the string representation +func (s ReplicationRuleFilter) String() string { + return awsutil.Prettify(s) } -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput { - s.SSEKMSKeyId = &v - return s +// GoString returns the string representation +func (s ReplicationRuleFilter) GoString() string { + return s.String() } -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *PutObjectInput) SetServerSideEncryption(v string) *PutObjectInput { - s.ServerSideEncryption = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationRuleFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetStorageClass sets the StorageClass field's value. -func (s *PutObjectInput) SetStorageClass(v string) *PutObjectInput { - s.StorageClass = &v +// SetAnd sets the And field's value. +func (s *ReplicationRuleFilter) SetAnd(v *ReplicationRuleAndOperator) *ReplicationRuleFilter { + s.And = v return s } -// SetTagging sets the Tagging field's value. -func (s *PutObjectInput) SetTagging(v string) *PutObjectInput { - s.Tagging = &v +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRuleFilter) SetPrefix(v string) *ReplicationRuleFilter { + s.Prefix = &v return s } -// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. -func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput { - s.WebsiteRedirectLocation = &v +// SetTag sets the Tag field's value. +func (s *ReplicationRuleFilter) SetTag(v *Tag) *ReplicationRuleFilter { + s.Tag = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectOutput -type PutObjectOutput struct { +type RequestPaymentConfiguration struct { _ struct{} `type:"structure"` - // Entity tag for the uploaded object. - ETag *string `location:"header" locationName:"ETag" type:"string"` - - // If the object expiration is configured, this will contain the expiration - // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // Version of the object. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + // Specifies who pays for the download and request fees. + // + // Payer is a required field + Payer *string `type:"string" required:"true" enum:"Payer"` } // String returns the string representation -func (s PutObjectOutput) String() string { +func (s RequestPaymentConfiguration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutObjectOutput) GoString() string { +func (s RequestPaymentConfiguration) GoString() string { return s.String() } -// SetETag sets the ETag field's value. -func (s *PutObjectOutput) SetETag(v string) *PutObjectOutput { - s.ETag = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *RequestPaymentConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RequestPaymentConfiguration"} + if s.Payer == nil { + invalidParams.Add(request.NewErrParamRequired("Payer")) + } -// SetExpiration sets the Expiration field's value. -func (s *PutObjectOutput) SetExpiration(v string) *PutObjectOutput { - s.Expiration = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetRequestCharged sets the RequestCharged field's value. -func (s *PutObjectOutput) SetRequestCharged(v string) *PutObjectOutput { - s.RequestCharged = &v +// SetPayer sets the Payer field's value. +func (s *RequestPaymentConfiguration) SetPayer(v string) *RequestPaymentConfiguration { + s.Payer = &v return s } -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *PutObjectOutput) SetSSECustomerAlgorithm(v string) *PutObjectOutput { - s.SSECustomerAlgorithm = &v - return s -} +type RequestProgress struct { + _ struct{} `type:"structure"` -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *PutObjectOutput) SetSSECustomerKeyMD5(v string) *PutObjectOutput { - s.SSECustomerKeyMD5 = &v - return s + // Specifies whether periodic QueryProgress frames should be sent. Valid values: + // TRUE, FALSE. Default value: FALSE. + Enabled *bool `type:"boolean"` } -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput { - s.SSEKMSKeyId = &v - return s +// String returns the string representation +func (s RequestProgress) String() string { + return awsutil.Prettify(s) } -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *PutObjectOutput) SetServerSideEncryption(v string) *PutObjectOutput { - s.ServerSideEncryption = &v - return s +// GoString returns the string representation +func (s RequestProgress) GoString() string { + return s.String() } -// SetVersionId sets the VersionId field's value. -func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput { - s.VersionId = &v +// SetEnabled sets the Enabled field's value. +func (s *RequestProgress) SetEnabled(v bool) *RequestProgress { + s.Enabled = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingRequest -type PutObjectTaggingInput struct { - _ struct{} `type:"structure" payload:"Tagging"` +type RestoreObjectInput struct { + _ struct{} `type:"structure" payload:"RestoreRequest"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -16950,40 +21823,46 @@ type PutObjectTaggingInput struct { // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // Tagging is a required field - Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"` + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Container for restore job parameters. + RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } // String returns the string representation -func (s PutObjectTaggingInput) String() string { +func (s RestoreObjectInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutObjectTaggingInput) GoString() string { +func (s RestoreObjectInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutObjectTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutObjectTaggingInput"} +func (s *RestoreObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreObjectInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } if s.Key != nil && len(*s.Key) < 1 { invalidParams.Add(request.NewErrParamMinLen("Key", 1)) } - if s.Tagging == nil { - invalidParams.Add(request.NewErrParamRequired("Tagging")) - } - if s.Tagging != nil { - if err := s.Tagging.Validate(); err != nil { - invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + if s.RestoreRequest != nil { + if err := s.RestoreRequest.Validate(); err != nil { + invalidParams.AddNested("RestoreRequest", err.(request.ErrInvalidParams)) } } @@ -16994,94 +21873,213 @@ func (s *PutObjectTaggingInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *PutObjectTaggingInput) SetBucket(v string) *PutObjectTaggingInput { +func (s *RestoreObjectInput) SetBucket(v string) *RestoreObjectInput { s.Bucket = &v return s } +func (s *RestoreObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetKey sets the Key field's value. -func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput { +func (s *RestoreObjectInput) SetKey(v string) *RestoreObjectInput { s.Key = &v return s } -// SetTagging sets the Tagging field's value. -func (s *PutObjectTaggingInput) SetTagging(v *Tagging) *PutObjectTaggingInput { - s.Tagging = v +// SetRequestPayer sets the RequestPayer field's value. +func (s *RestoreObjectInput) SetRequestPayer(v string) *RestoreObjectInput { + s.RequestPayer = &v + return s +} + +// SetRestoreRequest sets the RestoreRequest field's value. +func (s *RestoreObjectInput) SetRestoreRequest(v *RestoreRequest) *RestoreObjectInput { + s.RestoreRequest = v return s } // SetVersionId sets the VersionId field's value. -func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput { +func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput { s.VersionId = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingOutput -type PutObjectTaggingOutput struct { +type RestoreObjectOutput struct { _ struct{} `type:"structure"` - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Indicates the path in the provided S3 output location where Select results + // will be restored to. + RestoreOutputPath *string `location:"header" locationName:"x-amz-restore-output-path" type:"string"` } // String returns the string representation -func (s PutObjectTaggingOutput) String() string { +func (s RestoreObjectOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutObjectTaggingOutput) GoString() string { +func (s RestoreObjectOutput) GoString() string { return s.String() } -// SetVersionId sets the VersionId field's value. -func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput { - s.VersionId = &v +// SetRequestCharged sets the RequestCharged field's value. +func (s *RestoreObjectOutput) SetRequestCharged(v string) *RestoreObjectOutput { + s.RequestCharged = &v return s } -// Container for specifying an configuration when you want Amazon S3 to publish -// events to an Amazon Simple Queue Service (Amazon SQS) queue. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfiguration -type QueueConfiguration struct { +// SetRestoreOutputPath sets the RestoreOutputPath field's value. +func (s *RestoreObjectOutput) SetRestoreOutputPath(v string) *RestoreObjectOutput { + s.RestoreOutputPath = &v + return s +} + +// Container for restore job parameters. +type RestoreRequest struct { _ struct{} `type:"structure"` - // Events is a required field - Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + // Lifetime of the active copy in days. Do not use with restores that specify + // OutputLocation. + Days *int64 `type:"integer"` + + // The optional description for the job. + Description *string `type:"string"` + + // Glacier related parameters pertaining to this job. Do not use with restores + // that specify OutputLocation. + GlacierJobParameters *GlacierJobParameters `type:"structure"` + + // Describes the location where the restore job's output is stored. + OutputLocation *OutputLocation `type:"structure"` + + // Describes the parameters for Select job types. + SelectParameters *SelectParameters `type:"structure"` + + // Glacier retrieval tier at which the restore will be processed. + Tier *string `type:"string" enum:"Tier"` + + // Type of restore request. + Type *string `type:"string" enum:"RestoreRequestType"` +} + +// String returns the string representation +func (s RestoreRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"} + if s.GlacierJobParameters != nil { + if err := s.GlacierJobParameters.Validate(); err != nil { + invalidParams.AddNested("GlacierJobParameters", err.(request.ErrInvalidParams)) + } + } + if s.OutputLocation != nil { + if err := s.OutputLocation.Validate(); err != nil { + invalidParams.AddNested("OutputLocation", err.(request.ErrInvalidParams)) + } + } + if s.SelectParameters != nil { + if err := s.SelectParameters.Validate(); err != nil { + invalidParams.AddNested("SelectParameters", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDays sets the Days field's value. +func (s *RestoreRequest) SetDays(v int64) *RestoreRequest { + s.Days = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *RestoreRequest) SetDescription(v string) *RestoreRequest { + s.Description = &v + return s +} + +// SetGlacierJobParameters sets the GlacierJobParameters field's value. +func (s *RestoreRequest) SetGlacierJobParameters(v *GlacierJobParameters) *RestoreRequest { + s.GlacierJobParameters = v + return s +} + +// SetOutputLocation sets the OutputLocation field's value. +func (s *RestoreRequest) SetOutputLocation(v *OutputLocation) *RestoreRequest { + s.OutputLocation = v + return s +} + +// SetSelectParameters sets the SelectParameters field's value. +func (s *RestoreRequest) SetSelectParameters(v *SelectParameters) *RestoreRequest { + s.SelectParameters = v + return s +} + +// SetTier sets the Tier field's value. +func (s *RestoreRequest) SetTier(v string) *RestoreRequest { + s.Tier = &v + return s +} + +// SetType sets the Type field's value. +func (s *RestoreRequest) SetType(v string) *RestoreRequest { + s.Type = &v + return s +} - // Container for object key name filtering rules. For information about key - // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - Filter *NotificationConfigurationFilter `type:"structure"` +type RoutingRule struct { + _ struct{} `type:"structure"` - // Optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` + // A container for describing a condition that must be met for the specified + // redirect to apply. For example, 1. If request is for pages in the /docs folder, + // redirect to the /documents folder. 2. If request results in HTTP error 4xx, + // redirect request to another host where you might process the error. + Condition *Condition `type:"structure"` - // Amazon SQS queue ARN to which Amazon S3 will publish a message when it detects - // events of specified type. + // Container for redirect information. You can redirect requests to another + // host, to another page, or with another protocol. In the event of an error, + // you can specify a different error code to return. // - // QueueArn is a required field - QueueArn *string `locationName:"Queue" type:"string" required:"true"` + // Redirect is a required field + Redirect *Redirect `type:"structure" required:"true"` } // String returns the string representation -func (s QueueConfiguration) String() string { +func (s RoutingRule) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s QueueConfiguration) GoString() string { +func (s RoutingRule) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *QueueConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "QueueConfiguration"} - if s.Events == nil { - invalidParams.Add(request.NewErrParamRequired("Events")) - } - if s.QueueArn == nil { - invalidParams.Add(request.NewErrParamRequired("QueueArn")) +func (s *RoutingRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RoutingRule"} + if s.Redirect == nil { + invalidParams.Add(request.NewErrParamRequired("Redirect")) } if invalidParams.Len() > 0 { @@ -17090,314 +22088,500 @@ func (s *QueueConfiguration) Validate() error { return nil } -// SetEvents sets the Events field's value. -func (s *QueueConfiguration) SetEvents(v []*string) *QueueConfiguration { - s.Events = v +// SetCondition sets the Condition field's value. +func (s *RoutingRule) SetCondition(v *Condition) *RoutingRule { + s.Condition = v return s } -// SetFilter sets the Filter field's value. -func (s *QueueConfiguration) SetFilter(v *NotificationConfigurationFilter) *QueueConfiguration { - s.Filter = v +// SetRedirect sets the Redirect field's value. +func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { + s.Redirect = v return s } -// SetId sets the Id field's value. -func (s *QueueConfiguration) SetId(v string) *QueueConfiguration { - s.Id = &v - return s -} +type Rule struct { + _ struct{} `type:"structure"` -// SetQueueArn sets the QueueArn field's value. -func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration { - s.QueueArn = &v - return s -} + // Specifies the days since the initiation of an Incomplete Multipart Upload + // that Lifecycle will wait before permanently removing all parts of the upload. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfigurationDeprecated -type QueueConfigurationDeprecated struct { - _ struct{} `type:"structure"` + Expiration *LifecycleExpiration `type:"structure"` - // Bucket event for which to send notifications. - Event *string `deprecated:"true" type:"string" enum:"Event"` + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` - Events []*string `locationName:"Event" type:"list" flattened:"true"` + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` - // Optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` + // Container for the transition rule that describes when noncurrent objects + // transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING or GLACIER + // storage class. If your bucket is versioning-enabled (or versioning is suspended), + // you can set this action to request that Amazon S3 transition noncurrent object + // versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING or GLACIER storage + // class at a specific period in the object's lifetime. + NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` - Queue *string `type:"string"` + // Prefix identifying one or more objects to which the rule applies. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + Transition *Transition `type:"structure"` } // String returns the string representation -func (s QueueConfigurationDeprecated) String() string { +func (s Rule) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s QueueConfigurationDeprecated) GoString() string { +func (s Rule) GoString() string { return s.String() } -// SetEvent sets the Event field's value. -func (s *QueueConfigurationDeprecated) SetEvent(v string) *QueueConfigurationDeprecated { - s.Event = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *Rule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Rule"} + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. +func (s *Rule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *Rule { + s.AbortIncompleteMultipartUpload = v return s } -// SetEvents sets the Events field's value. -func (s *QueueConfigurationDeprecated) SetEvents(v []*string) *QueueConfigurationDeprecated { - s.Events = v +// SetExpiration sets the Expiration field's value. +func (s *Rule) SetExpiration(v *LifecycleExpiration) *Rule { + s.Expiration = v return s } -// SetId sets the Id field's value. -func (s *QueueConfigurationDeprecated) SetId(v string) *QueueConfigurationDeprecated { - s.Id = &v +// SetID sets the ID field's value. +func (s *Rule) SetID(v string) *Rule { + s.ID = &v return s } -// SetQueue sets the Queue field's value. -func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDeprecated { - s.Queue = &v +// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. +func (s *Rule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *Rule { + s.NoncurrentVersionExpiration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Redirect -type Redirect struct { - _ struct{} `type:"structure"` +// SetNoncurrentVersionTransition sets the NoncurrentVersionTransition field's value. +func (s *Rule) SetNoncurrentVersionTransition(v *NoncurrentVersionTransition) *Rule { + s.NoncurrentVersionTransition = v + return s +} - // The host name to use in the redirect request. - HostName *string `type:"string"` +// SetPrefix sets the Prefix field's value. +func (s *Rule) SetPrefix(v string) *Rule { + s.Prefix = &v + return s +} - // The HTTP redirect code to use on the response. Not required if one of the - // siblings is present. - HttpRedirectCode *string `type:"string"` +// SetStatus sets the Status field's value. +func (s *Rule) SetStatus(v string) *Rule { + s.Status = &v + return s +} - // Protocol to use (http, https) when redirecting requests. The default is the - // protocol that is used in the original request. - Protocol *string `type:"string" enum:"Protocol"` +// SetTransition sets the Transition field's value. +func (s *Rule) SetTransition(v *Transition) *Rule { + s.Transition = v + return s +} - // The object key prefix to use in the redirect request. For example, to redirect - // requests for all pages with prefix docs/ (objects in the docs/ folder) to - // documents/, you can set a condition block with KeyPrefixEquals set to docs/ - // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required - // if one of the siblings is present. Can be present only if ReplaceKeyWith - // is not provided. - ReplaceKeyPrefixWith *string `type:"string"` +// Specifies the use of SSE-KMS to encrypt delivered Inventory reports. +type SSEKMS struct { + _ struct{} `locationName:"SSE-KMS" type:"structure"` - // The specific object key to use in the redirect request. For example, redirect - // request to error.html. Not required if one of the sibling is present. Can - // be present only if ReplaceKeyPrefixWith is not provided. - ReplaceKeyWith *string `type:"string"` + // Specifies the ID of the AWS Key Management Service (KMS) master encryption + // key to use for encrypting Inventory reports. + // + // KeyId is a required field + KeyId *string `type:"string" required:"true" sensitive:"true"` } // String returns the string representation -func (s Redirect) String() string { +func (s SSEKMS) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Redirect) GoString() string { +func (s SSEKMS) GoString() string { return s.String() } -// SetHostName sets the HostName field's value. -func (s *Redirect) SetHostName(v string) *Redirect { - s.HostName = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *SSEKMS) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SSEKMS"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } -// SetHttpRedirectCode sets the HttpRedirectCode field's value. -func (s *Redirect) SetHttpRedirectCode(v string) *Redirect { - s.HttpRedirectCode = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetProtocol sets the Protocol field's value. -func (s *Redirect) SetProtocol(v string) *Redirect { - s.Protocol = &v +// SetKeyId sets the KeyId field's value. +func (s *SSEKMS) SetKeyId(v string) *SSEKMS { + s.KeyId = &v return s } -// SetReplaceKeyPrefixWith sets the ReplaceKeyPrefixWith field's value. -func (s *Redirect) SetReplaceKeyPrefixWith(v string) *Redirect { - s.ReplaceKeyPrefixWith = &v - return s +// Specifies the use of SSE-S3 to encrypt delivered Inventory reports. +type SSES3 struct { + _ struct{} `locationName:"SSE-S3" type:"structure"` } -// SetReplaceKeyWith sets the ReplaceKeyWith field's value. -func (s *Redirect) SetReplaceKeyWith(v string) *Redirect { - s.ReplaceKeyWith = &v - return s +// String returns the string representation +func (s SSES3) String() string { + return awsutil.Prettify(s) } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RedirectAllRequestsTo -type RedirectAllRequestsTo struct { - _ struct{} `type:"structure"` +// GoString returns the string representation +func (s SSES3) GoString() string { + return s.String() +} - // Name of the host where requests will be redirected. +// SelectObjectContentEventStream provides handling of EventStreams for +// the SelectObjectContent API. +// +// Use this type to receive SelectObjectContentEventStream events. The events +// can be read from the Events channel member. +// +// The events that can be received are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +type SelectObjectContentEventStream struct { + // Reader is the EventStream reader for the SelectObjectContentEventStream + // events. This value is automatically set by the SDK when the API call is made + // Use this member when unit testing your code with the SDK to mock out the + // EventStream Reader. // - // HostName is a required field - HostName *string `type:"string" required:"true"` - - // Protocol to use (http, https) when redirecting requests. The default is the - // protocol that is used in the original request. - Protocol *string `type:"string" enum:"Protocol"` -} + // Must not be nil. + Reader SelectObjectContentEventStreamReader -// String returns the string representation -func (s RedirectAllRequestsTo) String() string { - return awsutil.Prettify(s) + // StreamCloser is the io.Closer for the EventStream connection. For HTTP + // EventStream this is the response Body. The stream will be closed when + // the Close method of the EventStream is called. + StreamCloser io.Closer } -// GoString returns the string representation -func (s RedirectAllRequestsTo) GoString() string { - return s.String() +// Close closes the EventStream. This will also cause the Events channel to be +// closed. You can use the closing of the Events channel to terminate your +// application's read from the API's EventStream. +// +// Will close the underlying EventStream reader. For EventStream over HTTP +// connection this will also close the HTTP connection. +// +// Close must be called when done using the EventStream API. Not calling Close +// may result in resource leaks. +func (es *SelectObjectContentEventStream) Close() (err error) { + es.Reader.Close() + return es.Err() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *RedirectAllRequestsTo) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RedirectAllRequestsTo"} - if s.HostName == nil { - invalidParams.Add(request.NewErrParamRequired("HostName")) +// Err returns any error that occurred while reading EventStream Events from +// the service API's response. Returns nil if there were no errors. +func (es *SelectObjectContentEventStream) Err() error { + if err := es.Reader.Err(); err != nil { + return err } + es.StreamCloser.Close() - if invalidParams.Len() > 0 { - return invalidParams - } return nil } -// SetHostName sets the HostName field's value. -func (s *RedirectAllRequestsTo) SetHostName(v string) *RedirectAllRequestsTo { - s.HostName = &v - return s +// Events returns a channel to read EventStream Events from the +// SelectObjectContent API. +// +// These events are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { + return es.Reader.Events() +} + +// SelectObjectContentEventStreamEvent groups together all EventStream +// events read from the SelectObjectContent API. +// +// These events are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +type SelectObjectContentEventStreamEvent interface { + eventSelectObjectContentEventStream() +} + +// SelectObjectContentEventStreamReader provides the interface for reading EventStream +// Events from the SelectObjectContent API. The +// default implementation for this interface will be SelectObjectContentEventStream. +// +// The reader's Close method must allow multiple concurrent calls. +// +// These events are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +type SelectObjectContentEventStreamReader interface { + // Returns a channel of events as they are read from the event stream. + Events() <-chan SelectObjectContentEventStreamEvent + + // Close will close the underlying event stream reader. For event stream over + // HTTP this will also close the HTTP connection. + Close() error + + // Returns any error that has occurred while reading from the event stream. + Err() error } -// SetProtocol sets the Protocol field's value. -func (s *RedirectAllRequestsTo) SetProtocol(v string) *RedirectAllRequestsTo { - s.Protocol = &v - return s +type readSelectObjectContentEventStream struct { + eventReader *eventstreamapi.EventReader + stream chan SelectObjectContentEventStreamEvent + errVal atomic.Value + + done chan struct{} + closeOnce sync.Once } -// Container for replication rules. You can add as many as 1,000 rules. Total -// replication configuration size can be up to 2 MB. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationConfiguration -type ReplicationConfiguration struct { - _ struct{} `type:"structure"` +func newReadSelectObjectContentEventStream( + reader io.ReadCloser, + unmarshalers request.HandlerList, + logger aws.Logger, + logLevel aws.LogLevelType, +) *readSelectObjectContentEventStream { + r := &readSelectObjectContentEventStream{ + stream: make(chan SelectObjectContentEventStreamEvent), + done: make(chan struct{}), + } - // Amazon Resource Name (ARN) of an IAM role for Amazon S3 to assume when replicating - // the objects. - // - // Role is a required field - Role *string `type:"string" required:"true"` + r.eventReader = eventstreamapi.NewEventReader( + reader, + protocol.HandlerPayloadUnmarshal{ + Unmarshalers: unmarshalers, + }, + r.unmarshalerForEventType, + ) + r.eventReader.UseLogger(logger, logLevel) - // Container for information about a particular replication rule. Replication - // configuration must have at least one rule and can contain up to 1,000 rules. - // - // Rules is a required field - Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` + return r } -// String returns the string representation -func (s ReplicationConfiguration) String() string { - return awsutil.Prettify(s) -} +// Close will close the underlying event stream reader. For EventStream over +// HTTP this will also close the HTTP connection. +func (r *readSelectObjectContentEventStream) Close() error { + r.closeOnce.Do(r.safeClose) -// GoString returns the string representation -func (s ReplicationConfiguration) GoString() string { - return s.String() + return r.Err() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ReplicationConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReplicationConfiguration"} - if s.Role == nil { - invalidParams.Add(request.NewErrParamRequired("Role")) +func (r *readSelectObjectContentEventStream) safeClose() { + close(r.done) + err := r.eventReader.Close() + if err != nil { + r.errVal.Store(err) } - if s.Rules == nil { - invalidParams.Add(request.NewErrParamRequired("Rules")) +} + +func (r *readSelectObjectContentEventStream) Err() error { + if v := r.errVal.Load(); v != nil { + return v.(error) } - if s.Rules != nil { - for i, v := range s.Rules { - if v == nil { - continue + + return nil +} + +func (r *readSelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { + return r.stream +} + +func (r *readSelectObjectContentEventStream) readEventStream() { + defer close(r.stream) + + for { + event, err := r.eventReader.ReadEvent() + if err != nil { + if err == io.EOF { + return } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + select { + case <-r.done: + // If closed already ignore the error + return + default: } + r.errVal.Store(err) + return } - } - if invalidParams.Len() > 0 { - return invalidParams + select { + case r.stream <- event.(SelectObjectContentEventStreamEvent): + case <-r.done: + return + } } - return nil } -// SetRole sets the Role field's value. -func (s *ReplicationConfiguration) SetRole(v string) *ReplicationConfiguration { - s.Role = &v - return s -} +func (r *readSelectObjectContentEventStream) unmarshalerForEventType( + eventType string, +) (eventstreamapi.Unmarshaler, error) { + switch eventType { + case "Cont": + return &ContinuationEvent{}, nil -// SetRules sets the Rules field's value. -func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationConfiguration { - s.Rules = v - return s + case "End": + return &EndEvent{}, nil + + case "Progress": + return &ProgressEvent{}, nil + + case "Records": + return &RecordsEvent{}, nil + + case "Stats": + return &StatsEvent{}, nil + default: + return nil, awserr.New( + request.ErrCodeSerialization, + fmt.Sprintf("unknown event type name, %s, for SelectObjectContentEventStream", eventType), + nil, + ) + } } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationRule -type ReplicationRule struct { - _ struct{} `type:"structure"` +// Request to filter the contents of an Amazon S3 object based on a simple Structured +// Query Language (SQL) statement. In the request, along with the SQL expression, +// you must specify a data serialization format (JSON or CSV) of the object. +// Amazon S3 uses this to parse object data into records. It returns only records +// that match the specified SQL expression. You must also specify the data serialization +// format for the response. For more information, see S3Select API Documentation +// (http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html). +type SelectObjectContentInput struct { + _ struct{} `locationName:"SelectObjectContentRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // Destination is a required field - Destination *Destination `type:"structure" required:"true"` + // The S3 bucket. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Unique identifier for the rule. The value cannot be longer than 255 characters. - ID *string `type:"string"` + // The expression that is used to query the object. + // + // Expression is a required field + Expression *string `type:"string" required:"true"` - // Object keyname prefix identifying one or more objects to which the rule applies. - // Maximum prefix length can be up to 1,024 characters. Overlapping prefixes - // are not supported. + // The type of the provided expression (for example., SQL). // - // Prefix is a required field - Prefix *string `type:"string" required:"true"` + // ExpressionType is a required field + ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` - // The rule is ignored if status is not Enabled. + // Describes the format of the data in the object that is being queried. // - // Status is a required field - Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` + // InputSerialization is a required field + InputSerialization *InputSerialization `type:"structure" required:"true"` + + // The object key. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Describes the format of the data that you want Amazon S3 to return in response. + // + // OutputSerialization is a required field + OutputSerialization *OutputSerialization `type:"structure" required:"true"` + + // Specifies if periodic request progress information should be enabled. + RequestProgress *RequestProgress `type:"structure"` + + // The SSE Algorithm used to encrypt the object. For more information, see + // Server-Side Encryption (Using Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // The SSE Customer Key. For more information, see Server-Side Encryption (Using + // Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // The SSE Customer Key MD5. For more information, see Server-Side Encryption + // (Using Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` } // String returns the string representation -func (s ReplicationRule) String() string { +func (s SelectObjectContentInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ReplicationRule) GoString() string { +func (s SelectObjectContentInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ReplicationRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReplicationRule"} - if s.Destination == nil { - invalidParams.Add(request.NewErrParamRequired("Destination")) +func (s *SelectObjectContentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelectObjectContentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Prefix == nil { - invalidParams.Add(request.NewErrParamRequired("Prefix")) + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) } - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) + if s.Expression == nil { + invalidParams.Add(request.NewErrParamRequired("Expression")) } - if s.Destination != nil { - if err := s.Destination.Validate(); err != nil { - invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) - } + if s.ExpressionType == nil { + invalidParams.Add(request.NewErrParamRequired("ExpressionType")) + } + if s.InputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("InputSerialization")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.OutputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSerialization")) } if invalidParams.Len() > 0 { @@ -17406,116 +22590,171 @@ func (s *ReplicationRule) Validate() error { return nil } -// SetDestination sets the Destination field's value. -func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule { - s.Destination = v +// SetBucket sets the Bucket field's value. +func (s *SelectObjectContentInput) SetBucket(v string) *SelectObjectContentInput { + s.Bucket = &v return s } -// SetID sets the ID field's value. -func (s *ReplicationRule) SetID(v string) *ReplicationRule { - s.ID = &v +func (s *SelectObjectContentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpression sets the Expression field's value. +func (s *SelectObjectContentInput) SetExpression(v string) *SelectObjectContentInput { + s.Expression = &v return s } -// SetPrefix sets the Prefix field's value. -func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule { - s.Prefix = &v +// SetExpressionType sets the ExpressionType field's value. +func (s *SelectObjectContentInput) SetExpressionType(v string) *SelectObjectContentInput { + s.ExpressionType = &v return s } -// SetStatus sets the Status field's value. -func (s *ReplicationRule) SetStatus(v string) *ReplicationRule { - s.Status = &v +// SetInputSerialization sets the InputSerialization field's value. +func (s *SelectObjectContentInput) SetInputSerialization(v *InputSerialization) *SelectObjectContentInput { + s.InputSerialization = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RequestPaymentConfiguration -type RequestPaymentConfiguration struct { - _ struct{} `type:"structure"` +// SetKey sets the Key field's value. +func (s *SelectObjectContentInput) SetKey(v string) *SelectObjectContentInput { + s.Key = &v + return s +} - // Specifies who pays for the download and request fees. - // - // Payer is a required field - Payer *string `type:"string" required:"true" enum:"Payer"` +// SetOutputSerialization sets the OutputSerialization field's value. +func (s *SelectObjectContentInput) SetOutputSerialization(v *OutputSerialization) *SelectObjectContentInput { + s.OutputSerialization = v + return s +} + +// SetRequestProgress sets the RequestProgress field's value. +func (s *SelectObjectContentInput) SetRequestProgress(v *RequestProgress) *SelectObjectContentInput { + s.RequestProgress = v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *SelectObjectContentInput) SetSSECustomerAlgorithm(v string) *SelectObjectContentInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *SelectObjectContentInput) SetSSECustomerKey(v string) *SelectObjectContentInput { + s.SSECustomerKey = &v + return s +} + +func (s *SelectObjectContentInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *SelectObjectContentInput) SetSSECustomerKeyMD5(v string) *SelectObjectContentInput { + s.SSECustomerKeyMD5 = &v + return s +} + +type SelectObjectContentOutput struct { + _ struct{} `type:"structure" payload:"Payload"` + + // Use EventStream to use the API's stream. + EventStream *SelectObjectContentEventStream `type:"structure"` } // String returns the string representation -func (s RequestPaymentConfiguration) String() string { +func (s SelectObjectContentOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RequestPaymentConfiguration) GoString() string { +func (s SelectObjectContentOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *RequestPaymentConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RequestPaymentConfiguration"} - if s.Payer == nil { - invalidParams.Add(request.NewErrParamRequired("Payer")) - } +// SetEventStream sets the EventStream field's value. +func (s *SelectObjectContentOutput) SetEventStream(v *SelectObjectContentEventStream) *SelectObjectContentOutput { + s.EventStream = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams +func (s *SelectObjectContentOutput) runEventStreamLoop(r *request.Request) { + if r.Error != nil { + return } - return nil -} + reader := newReadSelectObjectContentEventStream( + r.HTTPResponse.Body, + r.Handlers.UnmarshalStream, + r.Config.Logger, + r.Config.LogLevel.Value(), + ) + go reader.readEventStream() -// SetPayer sets the Payer field's value. -func (s *RequestPaymentConfiguration) SetPayer(v string) *RequestPaymentConfiguration { - s.Payer = &v - return s + eventStream := &SelectObjectContentEventStream{ + StreamCloser: r.HTTPResponse.Body, + Reader: reader, + } + s.EventStream = eventStream } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectRequest -type RestoreObjectInput struct { - _ struct{} `type:"structure" payload:"RestoreRequest"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +// Describes the parameters for Select job types. +type SelectParameters struct { + _ struct{} `type:"structure"` - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + // The expression that is used to query the object. + // + // Expression is a required field + Expression *string `type:"string" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // The type of the provided expression (e.g., SQL). + // + // ExpressionType is a required field + ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` - RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure"` + // Describes the serialization format of the object. + // + // InputSerialization is a required field + InputSerialization *InputSerialization `type:"structure" required:"true"` - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` + // Describes how the results of the Select job are serialized. + // + // OutputSerialization is a required field + OutputSerialization *OutputSerialization `type:"structure" required:"true"` } // String returns the string representation -func (s RestoreObjectInput) String() string { +func (s SelectParameters) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RestoreObjectInput) GoString() string { +func (s SelectParameters) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *RestoreObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RestoreObjectInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) +func (s *SelectParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelectParameters"} + if s.Expression == nil { + invalidParams.Add(request.NewErrParamRequired("Expression")) } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) + if s.ExpressionType == nil { + invalidParams.Add(request.NewErrParamRequired("ExpressionType")) } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + if s.InputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("InputSerialization")) } - if s.RestoreRequest != nil { - if err := s.RestoreRequest.Validate(); err != nil { - invalidParams.AddNested("RestoreRequest", err.(request.ErrInvalidParams)) - } + if s.OutputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSerialization")) } if invalidParams.Len() > 0 { @@ -17524,93 +22763,117 @@ func (s *RestoreObjectInput) Validate() error { return nil } -// SetBucket sets the Bucket field's value. -func (s *RestoreObjectInput) SetBucket(v string) *RestoreObjectInput { - s.Bucket = &v - return s -} - -// SetKey sets the Key field's value. -func (s *RestoreObjectInput) SetKey(v string) *RestoreObjectInput { - s.Key = &v +// SetExpression sets the Expression field's value. +func (s *SelectParameters) SetExpression(v string) *SelectParameters { + s.Expression = &v return s } -// SetRequestPayer sets the RequestPayer field's value. -func (s *RestoreObjectInput) SetRequestPayer(v string) *RestoreObjectInput { - s.RequestPayer = &v +// SetExpressionType sets the ExpressionType field's value. +func (s *SelectParameters) SetExpressionType(v string) *SelectParameters { + s.ExpressionType = &v return s } -// SetRestoreRequest sets the RestoreRequest field's value. -func (s *RestoreObjectInput) SetRestoreRequest(v *RestoreRequest) *RestoreObjectInput { - s.RestoreRequest = v +// SetInputSerialization sets the InputSerialization field's value. +func (s *SelectParameters) SetInputSerialization(v *InputSerialization) *SelectParameters { + s.InputSerialization = v return s } -// SetVersionId sets the VersionId field's value. -func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput { - s.VersionId = &v +// SetOutputSerialization sets the OutputSerialization field's value. +func (s *SelectParameters) SetOutputSerialization(v *OutputSerialization) *SelectParameters { + s.OutputSerialization = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectOutput -type RestoreObjectOutput struct { +// Describes the default server-side encryption to apply to new objects in the +// bucket. If Put Object request does not specify any server-side encryption, +// this default encryption will be applied. +type ServerSideEncryptionByDefault struct { _ struct{} `type:"structure"` - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + // KMS master key ID to use for the default encryption. This parameter is allowed + // if SSEAlgorithm is aws:kms. + KMSMasterKeyID *string `type:"string" sensitive:"true"` + + // Server-side encryption algorithm to use for the default encryption. + // + // SSEAlgorithm is a required field + SSEAlgorithm *string `type:"string" required:"true" enum:"ServerSideEncryption"` } // String returns the string representation -func (s RestoreObjectOutput) String() string { +func (s ServerSideEncryptionByDefault) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RestoreObjectOutput) GoString() string { +func (s ServerSideEncryptionByDefault) GoString() string { return s.String() } -// SetRequestCharged sets the RequestCharged field's value. -func (s *RestoreObjectOutput) SetRequestCharged(v string) *RestoreObjectOutput { - s.RequestCharged = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServerSideEncryptionByDefault) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionByDefault"} + if s.SSEAlgorithm == nil { + invalidParams.Add(request.NewErrParamRequired("SSEAlgorithm")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKMSMasterKeyID sets the KMSMasterKeyID field's value. +func (s *ServerSideEncryptionByDefault) SetKMSMasterKeyID(v string) *ServerSideEncryptionByDefault { + s.KMSMasterKeyID = &v + return s +} + +// SetSSEAlgorithm sets the SSEAlgorithm field's value. +func (s *ServerSideEncryptionByDefault) SetSSEAlgorithm(v string) *ServerSideEncryptionByDefault { + s.SSEAlgorithm = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreRequest -type RestoreRequest struct { +// Container for server-side encryption configuration rules. Currently S3 supports +// one rule only. +type ServerSideEncryptionConfiguration struct { _ struct{} `type:"structure"` - // Lifetime of the active copy in days + // Container for information about a particular server-side encryption configuration + // rule. // - // Days is a required field - Days *int64 `type:"integer" required:"true"` - - // Glacier related prameters pertaining to this job. - GlacierJobParameters *GlacierJobParameters `type:"structure"` + // Rules is a required field + Rules []*ServerSideEncryptionRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` } // String returns the string representation -func (s RestoreRequest) String() string { +func (s ServerSideEncryptionConfiguration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RestoreRequest) GoString() string { +func (s ServerSideEncryptionConfiguration) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *RestoreRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"} - if s.Days == nil { - invalidParams.Add(request.NewErrParamRequired("Days")) +func (s *ServerSideEncryptionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) } - if s.GlacierJobParameters != nil { - if err := s.GlacierJobParameters.Validate(); err != nil { - invalidParams.AddNested("GlacierJobParameters", err.(request.ErrInvalidParams)) + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } } } @@ -17620,51 +22883,40 @@ func (s *RestoreRequest) Validate() error { return nil } -// SetDays sets the Days field's value. -func (s *RestoreRequest) SetDays(v int64) *RestoreRequest { - s.Days = &v - return s -} - -// SetGlacierJobParameters sets the GlacierJobParameters field's value. -func (s *RestoreRequest) SetGlacierJobParameters(v *GlacierJobParameters) *RestoreRequest { - s.GlacierJobParameters = v +// SetRules sets the Rules field's value. +func (s *ServerSideEncryptionConfiguration) SetRules(v []*ServerSideEncryptionRule) *ServerSideEncryptionConfiguration { + s.Rules = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RoutingRule -type RoutingRule struct { +// Container for information about a particular server-side encryption configuration +// rule. +type ServerSideEncryptionRule struct { _ struct{} `type:"structure"` - // A container for describing a condition that must be met for the specified - // redirect to apply. For example, 1. If request is for pages in the /docs folder, - // redirect to the /documents folder. 2. If request results in HTTP error 4xx, - // redirect request to another host where you might process the error. - Condition *Condition `type:"structure"` - - // Container for redirect information. You can redirect requests to another - // host, to another page, or with another protocol. In the event of an error, - // you can can specify a different error code to return. - // - // Redirect is a required field - Redirect *Redirect `type:"structure" required:"true"` + // Describes the default server-side encryption to apply to new objects in the + // bucket. If Put Object request does not specify any server-side encryption, + // this default encryption will be applied. + ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault `type:"structure"` } // String returns the string representation -func (s RoutingRule) String() string { +func (s ServerSideEncryptionRule) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RoutingRule) GoString() string { +func (s ServerSideEncryptionRule) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *RoutingRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RoutingRule"} - if s.Redirect == nil { - invalidParams.Add(request.NewErrParamRequired("Redirect")) +func (s *ServerSideEncryptionRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionRule"} + if s.ApplyServerSideEncryptionByDefault != nil { + if err := s.ApplyServerSideEncryptionByDefault.Validate(); err != nil { + invalidParams.AddNested("ApplyServerSideEncryptionByDefault", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -17673,75 +22925,78 @@ func (s *RoutingRule) Validate() error { return nil } -// SetCondition sets the Condition field's value. -func (s *RoutingRule) SetCondition(v *Condition) *RoutingRule { - s.Condition = v - return s -} - -// SetRedirect sets the Redirect field's value. -func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { - s.Redirect = v +// SetApplyServerSideEncryptionByDefault sets the ApplyServerSideEncryptionByDefault field's value. +func (s *ServerSideEncryptionRule) SetApplyServerSideEncryptionByDefault(v *ServerSideEncryptionByDefault) *ServerSideEncryptionRule { + s.ApplyServerSideEncryptionByDefault = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Rule -type Rule struct { +// A container for filters that define which source objects should be replicated. +type SourceSelectionCriteria struct { _ struct{} `type:"structure"` - // Specifies the days since the initiation of an Incomplete Multipart Upload - // that Lifecycle will wait before permanently removing all parts of the upload. - AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + // A container for filter information for the selection of S3 objects encrypted + // with AWS KMS. If you include SourceSelectionCriteria in the replication configuration, + // this element is required. + SseKmsEncryptedObjects *SseKmsEncryptedObjects `type:"structure"` +} - Expiration *LifecycleExpiration `type:"structure"` +// String returns the string representation +func (s SourceSelectionCriteria) String() string { + return awsutil.Prettify(s) +} - // Unique identifier for the rule. The value cannot be longer than 255 characters. - ID *string `type:"string"` +// GoString returns the string representation +func (s SourceSelectionCriteria) GoString() string { + return s.String() +} - // Specifies when noncurrent object versions expire. Upon expiration, Amazon - // S3 permanently deletes the noncurrent object versions. You set this lifecycle - // configuration action on a bucket that has versioning enabled (or suspended) - // to request that Amazon S3 delete noncurrent object versions at a specific - // period in the object's lifetime. - NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *SourceSelectionCriteria) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SourceSelectionCriteria"} + if s.SseKmsEncryptedObjects != nil { + if err := s.SseKmsEncryptedObjects.Validate(); err != nil { + invalidParams.AddNested("SseKmsEncryptedObjects", err.(request.ErrInvalidParams)) + } + } - // Container for the transition rule that describes when noncurrent objects - // transition to the STANDARD_IA or GLACIER storage class. If your bucket is - // versioning-enabled (or versioning is suspended), you can set this action - // to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA - // or GLACIER storage class at a specific period in the object's lifetime. - NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // Prefix identifying one or more objects to which the rule applies. - // - // Prefix is a required field - Prefix *string `type:"string" required:"true"` +// SetSseKmsEncryptedObjects sets the SseKmsEncryptedObjects field's value. +func (s *SourceSelectionCriteria) SetSseKmsEncryptedObjects(v *SseKmsEncryptedObjects) *SourceSelectionCriteria { + s.SseKmsEncryptedObjects = v + return s +} - // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule - // is not currently being applied. +// A container for filter information for the selection of S3 objects encrypted +// with AWS KMS. +type SseKmsEncryptedObjects struct { + _ struct{} `type:"structure"` + + // If the status is not Enabled, replication for S3 objects encrypted with AWS + // KMS is disabled. // // Status is a required field - Status *string `type:"string" required:"true" enum:"ExpirationStatus"` - - Transition *Transition `type:"structure"` + Status *string `type:"string" required:"true" enum:"SseKmsEncryptedObjectsStatus"` } // String returns the string representation -func (s Rule) String() string { +func (s SseKmsEncryptedObjects) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Rule) GoString() string { +func (s SseKmsEncryptedObjects) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *Rule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Rule"} - if s.Prefix == nil { - invalidParams.Add(request.NewErrParamRequired("Prefix")) - } +func (s *SseKmsEncryptedObjects) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SseKmsEncryptedObjects"} if s.Status == nil { invalidParams.Add(request.NewErrParamRequired("Status")) } @@ -17752,55 +23007,93 @@ func (s *Rule) Validate() error { return nil } -// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. -func (s *Rule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *Rule { - s.AbortIncompleteMultipartUpload = v +// SetStatus sets the Status field's value. +func (s *SseKmsEncryptedObjects) SetStatus(v string) *SseKmsEncryptedObjects { + s.Status = &v return s } -// SetExpiration sets the Expiration field's value. -func (s *Rule) SetExpiration(v *LifecycleExpiration) *Rule { - s.Expiration = v - return s +type Stats struct { + _ struct{} `type:"structure"` + + // The total number of uncompressed object bytes processed. + BytesProcessed *int64 `type:"long"` + + // The total number of bytes of records payload data returned. + BytesReturned *int64 `type:"long"` + + // The total number of object bytes scanned. + BytesScanned *int64 `type:"long"` } -// SetID sets the ID field's value. -func (s *Rule) SetID(v string) *Rule { - s.ID = &v - return s +// String returns the string representation +func (s Stats) String() string { + return awsutil.Prettify(s) } -// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. -func (s *Rule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *Rule { - s.NoncurrentVersionExpiration = v - return s +// GoString returns the string representation +func (s Stats) GoString() string { + return s.String() } -// SetNoncurrentVersionTransition sets the NoncurrentVersionTransition field's value. -func (s *Rule) SetNoncurrentVersionTransition(v *NoncurrentVersionTransition) *Rule { - s.NoncurrentVersionTransition = v +// SetBytesProcessed sets the BytesProcessed field's value. +func (s *Stats) SetBytesProcessed(v int64) *Stats { + s.BytesProcessed = &v return s } -// SetPrefix sets the Prefix field's value. -func (s *Rule) SetPrefix(v string) *Rule { - s.Prefix = &v +// SetBytesReturned sets the BytesReturned field's value. +func (s *Stats) SetBytesReturned(v int64) *Stats { + s.BytesReturned = &v return s } -// SetStatus sets the Status field's value. -func (s *Rule) SetStatus(v string) *Rule { - s.Status = &v +// SetBytesScanned sets the BytesScanned field's value. +func (s *Stats) SetBytesScanned(v int64) *Stats { + s.BytesScanned = &v return s } -// SetTransition sets the Transition field's value. -func (s *Rule) SetTransition(v *Transition) *Rule { - s.Transition = v +type StatsEvent struct { + _ struct{} `locationName:"StatsEvent" type:"structure" payload:"Details"` + + // The Stats event details. + Details *Stats `locationName:"Details" type:"structure"` +} + +// String returns the string representation +func (s StatsEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StatsEvent) GoString() string { + return s.String() +} + +// SetDetails sets the Details field's value. +func (s *StatsEvent) SetDetails(v *Stats) *StatsEvent { + s.Details = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysis +// The StatsEvent is and event in the SelectObjectContentEventStream group of events. +func (s *StatsEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the StatsEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *StatsEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + if err := payloadUnmarshaler.UnmarshalPayload( + bytes.NewReader(msg.Payload), s, + ); err != nil { + return err + } + return nil +} + type StorageClassAnalysis struct { _ struct{} `type:"structure"` @@ -17840,7 +23133,6 @@ func (s *StorageClassAnalysis) SetDataExport(v *StorageClassAnalysisDataExport) return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysisDataExport type StorageClassAnalysisDataExport struct { _ struct{} `type:"structure"` @@ -17898,7 +23190,6 @@ func (s *StorageClassAnalysisDataExport) SetOutputSchemaVersion(v string) *Stora return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tag type Tag struct { _ struct{} `type:"structure"` @@ -17954,7 +23245,6 @@ func (s *Tag) SetValue(v string) *Tag { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tagging type Tagging struct { _ struct{} `type:"structure"` @@ -18001,11 +23291,10 @@ func (s *Tagging) SetTagSet(v []*Tag) *Tagging { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TargetGrant type TargetGrant struct { _ struct{} `type:"structure"` - Grantee *Grantee `type:"structure"` + Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` // Logging permissions assigned to the Grantee for the bucket. Permission *string `type:"string" enum:"BucketLogsPermission"` @@ -18048,25 +23337,26 @@ func (s *TargetGrant) SetPermission(v string) *TargetGrant { return s } -// Container for specifying the configuration when you want Amazon S3 to publish -// events to an Amazon Simple Notification Service (Amazon SNS) topic. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfiguration +// A container for specifying the configuration for publication of messages +// to an Amazon Simple Notification Service (Amazon SNS) topic.when Amazon S3 +// detects specified events. type TopicConfiguration struct { _ struct{} `type:"structure"` // Events is a required field Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - // Container for object key name filtering rules. For information about key - // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // A container for object key name filtering rules. For information about key + // name filtering, see Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. Filter *NotificationConfigurationFilter `type:"structure"` - // Optional unique identifier for configurations in a notification configuration. + // An optional unique identifier for configurations in a notification configuration. // If you don't provide one, Amazon S3 will assign an ID. Id *string `type:"string"` - // Amazon SNS topic ARN to which Amazon S3 will publish a message when it detects - // events of specified type. + // The Amazon Resource Name (ARN) of the Amazon SNS topic to which Amazon S3 + // will publish a message when it detects events of the specified type. // // TopicArn is a required field TopicArn *string `locationName:"Topic" type:"string" required:"true"` @@ -18122,16 +23412,17 @@ func (s *TopicConfiguration) SetTopicArn(v string) *TopicConfiguration { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfigurationDeprecated type TopicConfigurationDeprecated struct { _ struct{} `type:"structure"` // Bucket event for which to send notifications. + // + // Deprecated: Event has been deprecated Event *string `deprecated:"true" type:"string" enum:"Event"` Events []*string `locationName:"Event" type:"list" flattened:"true"` - // Optional unique identifier for configurations in a notification configuration. + // An optional unique identifier for configurations in a notification configuration. // If you don't provide one, Amazon S3 will assign an ID. Id *string `type:"string"` @@ -18174,7 +23465,6 @@ func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDep return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Transition type Transition struct { _ struct{} `type:"structure"` @@ -18218,7 +23508,6 @@ func (s *Transition) SetStorageClass(v string) *Transition { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyRequest type UploadPartCopyInput struct { _ struct{} `type:"structure"` @@ -18235,14 +23524,14 @@ type UploadPartCopyInput struct { CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` // Copies the object if it has been modified since the specified time. - CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` // Copies the object if its entity tag (ETag) is different than the specified // ETag. CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` // Copies the object if it hasn't been modified since the specified time. - CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` // The range of bytes to copy from the source object. The range value must use // the form bytes=first-last, where the first and last are the zero-based byte @@ -18257,7 +23546,7 @@ type UploadPartCopyInput struct { // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt // the source object. The encryption key provided in this header must be one // that was used when the source object was created. - CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` + CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -18288,7 +23577,7 @@ type UploadPartCopyInput struct { // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. This must be the same encryption key specified in the initiate multipart // upload request. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -18317,6 +23606,9 @@ func (s *UploadPartCopyInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.CopySource == nil { invalidParams.Add(request.NewErrParamRequired("CopySource")) } @@ -18345,6 +23637,13 @@ func (s *UploadPartCopyInput) SetBucket(v string) *UploadPartCopyInput { return s } +func (s *UploadPartCopyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetCopySource sets the CopySource field's value. func (s *UploadPartCopyInput) SetCopySource(v string) *UploadPartCopyInput { s.CopySource = &v @@ -18393,6 +23692,13 @@ func (s *UploadPartCopyInput) SetCopySourceSSECustomerKey(v string) *UploadPartC return s } +func (s *UploadPartCopyInput) getCopySourceSSECustomerKey() (v string) { + if s.CopySourceSSECustomerKey == nil { + return v + } + return *s.CopySourceSSECustomerKey +} + // SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. func (s *UploadPartCopyInput) SetCopySourceSSECustomerKeyMD5(v string) *UploadPartCopyInput { s.CopySourceSSECustomerKeyMD5 = &v @@ -18429,6 +23735,13 @@ func (s *UploadPartCopyInput) SetSSECustomerKey(v string) *UploadPartCopyInput { return s } +func (s *UploadPartCopyInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + // SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. func (s *UploadPartCopyInput) SetSSECustomerKeyMD5(v string) *UploadPartCopyInput { s.SSECustomerKeyMD5 = &v @@ -18441,7 +23754,6 @@ func (s *UploadPartCopyInput) SetUploadId(v string) *UploadPartCopyInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyOutput type UploadPartCopyOutput struct { _ struct{} `type:"structure" payload:"CopyPartResult"` @@ -18467,7 +23779,7 @@ type UploadPartCopyOutput struct { // If present, specifies the ID of the AWS Key Management Service (KMS) master // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -18526,7 +23838,6 @@ func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopy return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartRequest type UploadPartInput struct { _ struct{} `type:"structure" payload:"Body"` @@ -18542,6 +23853,9 @@ type UploadPartInput struct { // body cannot be determined automatically. ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + // The base64-encoded 128-bit MD5 digest of the part data. + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + // Object key for which the multipart upload was initiated. // // Key is a required field @@ -18568,7 +23882,7 @@ type UploadPartInput struct { // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. This must be the same encryption key specified in the initiate multipart // upload request. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -18597,6 +23911,9 @@ func (s *UploadPartInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -18628,12 +23945,25 @@ func (s *UploadPartInput) SetBucket(v string) *UploadPartInput { return s } +func (s *UploadPartInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetContentLength sets the ContentLength field's value. func (s *UploadPartInput) SetContentLength(v int64) *UploadPartInput { s.ContentLength = &v return s } +// SetContentMD5 sets the ContentMD5 field's value. +func (s *UploadPartInput) SetContentMD5(v string) *UploadPartInput { + s.ContentMD5 = &v + return s +} + // SetKey sets the Key field's value. func (s *UploadPartInput) SetKey(v string) *UploadPartInput { s.Key = &v @@ -18664,6 +23994,13 @@ func (s *UploadPartInput) SetSSECustomerKey(v string) *UploadPartInput { return s } +func (s *UploadPartInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + // SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. func (s *UploadPartInput) SetSSECustomerKeyMD5(v string) *UploadPartInput { s.SSECustomerKeyMD5 = &v @@ -18676,7 +24013,6 @@ func (s *UploadPartInput) SetUploadId(v string) *UploadPartInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartOutput type UploadPartOutput struct { _ struct{} `type:"structure"` @@ -18699,7 +24035,7 @@ type UploadPartOutput struct { // If present, specifies the ID of the AWS Key Management Service (KMS) master // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -18752,7 +24088,6 @@ func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/VersioningConfiguration type VersioningConfiguration struct { _ struct{} `type:"structure"` @@ -18787,7 +24122,6 @@ func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WebsiteConfiguration type WebsiteConfiguration struct { _ struct{} `type:"structure"` @@ -18950,6 +24284,25 @@ const ( BucketVersioningStatusSuspended = "Suspended" ) +const ( + // CompressionTypeNone is a CompressionType enum value + CompressionTypeNone = "NONE" + + // CompressionTypeGzip is a CompressionType enum value + CompressionTypeGzip = "GZIP" + + // CompressionTypeBzip2 is a CompressionType enum value + CompressionTypeBzip2 = "BZIP2" +) + +const ( + // DeleteMarkerReplicationStatusEnabled is a DeleteMarkerReplicationStatus enum value + DeleteMarkerReplicationStatusEnabled = "Enabled" + + // DeleteMarkerReplicationStatusDisabled is a DeleteMarkerReplicationStatus enum value + DeleteMarkerReplicationStatusDisabled = "Disabled" +) + // Requests Amazon S3 to encode the object keys in the response and specifies // the encoding method to use. An object key may contain any Unicode character; // however, XML 1.0 parser cannot parse some characters, such as characters @@ -18961,7 +24314,7 @@ const ( EncodingTypeUrl = "url" ) -// Bucket event for which to send notifications. +// The bucket event for which to send notifications. const ( // EventS3ReducedRedundancyLostObject is a Event enum value EventS3ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" @@ -18989,6 +24342,12 @@ const ( // EventS3ObjectRemovedDeleteMarkerCreated is a Event enum value EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" + + // EventS3ObjectRestorePost is a Event enum value + EventS3ObjectRestorePost = "s3:ObjectRestore:Post" + + // EventS3ObjectRestoreCompleted is a Event enum value + EventS3ObjectRestoreCompleted = "s3:ObjectRestore:Completed" ) const ( @@ -18999,6 +24358,22 @@ const ( ExpirationStatusDisabled = "Disabled" ) +const ( + // ExpressionTypeSql is a ExpressionType enum value + ExpressionTypeSql = "SQL" +) + +const ( + // FileHeaderInfoUse is a FileHeaderInfo enum value + FileHeaderInfoUse = "USE" + + // FileHeaderInfoIgnore is a FileHeaderInfo enum value + FileHeaderInfoIgnore = "IGNORE" + + // FileHeaderInfoNone is a FileHeaderInfo enum value + FileHeaderInfoNone = "NONE" +) + const ( // FilterRuleNamePrefix is a FilterRuleName enum value FilterRuleNamePrefix = "prefix" @@ -19010,6 +24385,12 @@ const ( const ( // InventoryFormatCsv is a InventoryFormat enum value InventoryFormatCsv = "CSV" + + // InventoryFormatOrc is a InventoryFormat enum value + InventoryFormatOrc = "ORC" + + // InventoryFormatParquet is a InventoryFormat enum value + InventoryFormatParquet = "Parquet" ) const ( @@ -19046,6 +24427,26 @@ const ( // InventoryOptionalFieldReplicationStatus is a InventoryOptionalField enum value InventoryOptionalFieldReplicationStatus = "ReplicationStatus" + + // InventoryOptionalFieldEncryptionStatus is a InventoryOptionalField enum value + InventoryOptionalFieldEncryptionStatus = "EncryptionStatus" + + // InventoryOptionalFieldObjectLockRetainUntilDate is a InventoryOptionalField enum value + InventoryOptionalFieldObjectLockRetainUntilDate = "ObjectLockRetainUntilDate" + + // InventoryOptionalFieldObjectLockMode is a InventoryOptionalField enum value + InventoryOptionalFieldObjectLockMode = "ObjectLockMode" + + // InventoryOptionalFieldObjectLockLegalHoldStatus is a InventoryOptionalField enum value + InventoryOptionalFieldObjectLockLegalHoldStatus = "ObjectLockLegalHoldStatus" +) + +const ( + // JSONTypeDocument is a JSONType enum value + JSONTypeDocument = "DOCUMENT" + + // JSONTypeLines is a JSONType enum value + JSONTypeLines = "LINES" ) const ( @@ -19095,6 +24496,35 @@ const ( ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control" ) +const ( + // ObjectLockEnabledEnabled is a ObjectLockEnabled enum value + ObjectLockEnabledEnabled = "Enabled" +) + +const ( + // ObjectLockLegalHoldStatusOn is a ObjectLockLegalHoldStatus enum value + ObjectLockLegalHoldStatusOn = "ON" + + // ObjectLockLegalHoldStatusOff is a ObjectLockLegalHoldStatus enum value + ObjectLockLegalHoldStatusOff = "OFF" +) + +const ( + // ObjectLockModeGovernance is a ObjectLockMode enum value + ObjectLockModeGovernance = "GOVERNANCE" + + // ObjectLockModeCompliance is a ObjectLockMode enum value + ObjectLockModeCompliance = "COMPLIANCE" +) + +const ( + // ObjectLockRetentionModeGovernance is a ObjectLockRetentionMode enum value + ObjectLockRetentionModeGovernance = "GOVERNANCE" + + // ObjectLockRetentionModeCompliance is a ObjectLockRetentionMode enum value + ObjectLockRetentionModeCompliance = "COMPLIANCE" +) + const ( // ObjectStorageClassStandard is a ObjectStorageClass enum value ObjectStorageClassStandard = "STANDARD" @@ -19104,6 +24534,15 @@ const ( // ObjectStorageClassGlacier is a ObjectStorageClass enum value ObjectStorageClassGlacier = "GLACIER" + + // ObjectStorageClassStandardIa is a ObjectStorageClass enum value + ObjectStorageClassStandardIa = "STANDARD_IA" + + // ObjectStorageClassOnezoneIa is a ObjectStorageClass enum value + ObjectStorageClassOnezoneIa = "ONEZONE_IA" + + // ObjectStorageClassIntelligentTiering is a ObjectStorageClass enum value + ObjectStorageClassIntelligentTiering = "INTELLIGENT_TIERING" ) const ( @@ -19111,6 +24550,11 @@ const ( ObjectVersionStorageClassStandard = "STANDARD" ) +const ( + // OwnerOverrideDestination is a OwnerOverride enum value + OwnerOverrideDestination = "Destination" +) + const ( // PayerRequester is a Payer enum value PayerRequester = "Requester" @@ -19144,6 +24588,14 @@ const ( ProtocolHttps = "https" ) +const ( + // QuoteFieldsAlways is a QuoteFields enum value + QuoteFieldsAlways = "ALWAYS" + + // QuoteFieldsAsneeded is a QuoteFields enum value + QuoteFieldsAsneeded = "ASNEEDED" +) + const ( // ReplicationRuleStatusEnabled is a ReplicationRuleStatus enum value ReplicationRuleStatusEnabled = "Enabled" @@ -19182,6 +24634,11 @@ const ( RequestPayerRequester = "requester" ) +const ( + // RestoreRequestTypeSelect is a RestoreRequestType enum value + RestoreRequestTypeSelect = "SELECT" +) + const ( // ServerSideEncryptionAes256 is a ServerSideEncryption enum value ServerSideEncryptionAes256 = "AES256" @@ -19190,6 +24647,14 @@ const ( ServerSideEncryptionAwsKms = "aws:kms" ) +const ( + // SseKmsEncryptedObjectsStatusEnabled is a SseKmsEncryptedObjectsStatus enum value + SseKmsEncryptedObjectsStatusEnabled = "Enabled" + + // SseKmsEncryptedObjectsStatusDisabled is a SseKmsEncryptedObjectsStatus enum value + SseKmsEncryptedObjectsStatusDisabled = "Disabled" +) + const ( // StorageClassStandard is a StorageClass enum value StorageClassStandard = "STANDARD" @@ -19199,6 +24664,15 @@ const ( // StorageClassStandardIa is a StorageClass enum value StorageClassStandardIa = "STANDARD_IA" + + // StorageClassOnezoneIa is a StorageClass enum value + StorageClassOnezoneIa = "ONEZONE_IA" + + // StorageClassIntelligentTiering is a StorageClass enum value + StorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // StorageClassGlacier is a StorageClass enum value + StorageClassGlacier = "GLACIER" ) const ( @@ -19231,6 +24705,12 @@ const ( // TransitionStorageClassStandardIa is a TransitionStorageClass enum value TransitionStorageClassStandardIa = "STANDARD_IA" + + // TransitionStorageClassOnezoneIa is a TransitionStorageClass enum value + TransitionStorageClassOnezoneIa = "ONEZONE_IA" + + // TransitionStorageClassIntelligentTiering is a TransitionStorageClass enum value + TransitionStorageClassIntelligentTiering = "INTELLIGENT_TIERING" ) const ( diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go new file mode 100644 index 0000000000..5c8ce5cc8a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go @@ -0,0 +1,249 @@ +package s3 + +import ( + "bytes" + "crypto/md5" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "fmt" + "hash" + "io" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +const ( + contentMD5Header = "Content-Md5" + contentSha256Header = "X-Amz-Content-Sha256" + amzTeHeader = "X-Amz-Te" + amzTxEncodingHeader = "X-Amz-Transfer-Encoding" + + appendMD5TxEncoding = "append-md5" +) + +// contentMD5 computes and sets the HTTP Content-MD5 header for requests that +// require it. +func contentMD5(r *request.Request) { + h := md5.New() + + if !aws.IsReaderSeekable(r.Body) { + if r.Config.Logger != nil { + r.Config.Logger.Log(fmt.Sprintf( + "Unable to compute Content-MD5 for unseekable body, S3.%s", + r.Operation.Name)) + } + return + } + + if _, err := copySeekableBody(h, r.Body); err != nil { + r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err) + return + } + + // encode the md5 checksum in base64 and set the request header. + v := base64.StdEncoding.EncodeToString(h.Sum(nil)) + r.HTTPRequest.Header.Set(contentMD5Header, v) +} + +// computeBodyHashes will add Content MD5 and Content Sha256 hashes to the +// request. If the body is not seekable or S3DisableContentMD5Validation set +// this handler will be ignored. +func computeBodyHashes(r *request.Request) { + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + if r.IsPresigned() { + return + } + if r.Error != nil || !aws.IsReaderSeekable(r.Body) { + return + } + + var md5Hash, sha256Hash hash.Hash + hashers := make([]io.Writer, 0, 2) + + // Determine upfront which hashes can be set without overriding user + // provide header data. + if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) == 0 { + md5Hash = md5.New() + hashers = append(hashers, md5Hash) + } + + if v := r.HTTPRequest.Header.Get(contentSha256Header); len(v) == 0 { + sha256Hash = sha256.New() + hashers = append(hashers, sha256Hash) + } + + // Create the destination writer based on the hashes that are not already + // provided by the user. + var dst io.Writer + switch len(hashers) { + case 0: + return + case 1: + dst = hashers[0] + default: + dst = io.MultiWriter(hashers...) + } + + if _, err := copySeekableBody(dst, r.Body); err != nil { + r.Error = awserr.New("BodyHashError", "failed to compute body hashes", err) + return + } + + // For the hashes created, set the associated headers that the user did not + // already provide. + if md5Hash != nil { + sum := make([]byte, md5.Size) + encoded := make([]byte, md5Base64EncLen) + + base64.StdEncoding.Encode(encoded, md5Hash.Sum(sum[0:0])) + r.HTTPRequest.Header[contentMD5Header] = []string{string(encoded)} + } + + if sha256Hash != nil { + encoded := make([]byte, sha256HexEncLen) + sum := make([]byte, sha256.Size) + + hex.Encode(encoded, sha256Hash.Sum(sum[0:0])) + r.HTTPRequest.Header[contentSha256Header] = []string{string(encoded)} + } +} + +const ( + md5Base64EncLen = (md5.Size + 2) / 3 * 4 // base64.StdEncoding.EncodedLen + sha256HexEncLen = sha256.Size * 2 // hex.EncodedLen +) + +func copySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) { + curPos, err := src.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + // hash the body. seek back to the first position after reading to reset + // the body for transmission. copy errors may be assumed to be from the + // body. + n, err := io.Copy(dst, src) + if err != nil { + return n, err + } + + _, err = src.Seek(curPos, sdkio.SeekStart) + if err != nil { + return n, err + } + + return n, nil +} + +// Adds the x-amz-te: append_md5 header to the request. This requests the service +// responds with a trailing MD5 checksum. +// +// Will not ask for append MD5 if disabled, the request is presigned or, +// or the API operation does not support content MD5 validation. +func askForTxEncodingAppendMD5(r *request.Request) { + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + if r.IsPresigned() { + return + } + r.HTTPRequest.Header.Set(amzTeHeader, appendMD5TxEncoding) +} + +func useMD5ValidationReader(r *request.Request) { + if r.Error != nil { + return + } + + if v := r.HTTPResponse.Header.Get(amzTxEncodingHeader); v != appendMD5TxEncoding { + return + } + + var bodyReader *io.ReadCloser + var contentLen int64 + switch tv := r.Data.(type) { + case *GetObjectOutput: + bodyReader = &tv.Body + contentLen = aws.Int64Value(tv.ContentLength) + // Update ContentLength hiden the trailing MD5 checksum. + tv.ContentLength = aws.Int64(contentLen - md5.Size) + tv.ContentRange = aws.String(r.HTTPResponse.Header.Get("X-Amz-Content-Range")) + default: + r.Error = awserr.New("ChecksumValidationError", + fmt.Sprintf("%s: %s header received on unsupported API, %s", + amzTxEncodingHeader, appendMD5TxEncoding, r.Operation.Name, + ), nil) + return + } + + if contentLen < md5.Size { + r.Error = awserr.New("ChecksumValidationError", + fmt.Sprintf("invalid Content-Length %d for %s %s", + contentLen, appendMD5TxEncoding, amzTxEncodingHeader, + ), nil) + return + } + + // Wrap and swap the response body reader with the validation reader. + *bodyReader = newMD5ValidationReader(*bodyReader, contentLen-md5.Size) +} + +type md5ValidationReader struct { + rawReader io.ReadCloser + payload io.Reader + hash hash.Hash + + payloadLen int64 + read int64 +} + +func newMD5ValidationReader(reader io.ReadCloser, payloadLen int64) *md5ValidationReader { + h := md5.New() + return &md5ValidationReader{ + rawReader: reader, + payload: io.TeeReader(&io.LimitedReader{R: reader, N: payloadLen}, h), + hash: h, + payloadLen: payloadLen, + } +} + +func (v *md5ValidationReader) Read(p []byte) (n int, err error) { + n, err = v.payload.Read(p) + if err != nil && err != io.EOF { + return n, err + } + + v.read += int64(n) + + if err == io.EOF { + if v.read != v.payloadLen { + return n, io.ErrUnexpectedEOF + } + expectSum := make([]byte, md5.Size) + actualSum := make([]byte, md5.Size) + if _, sumReadErr := io.ReadFull(v.rawReader, expectSum); sumReadErr != nil { + return n, sumReadErr + } + actualSum = v.hash.Sum(actualSum[0:0]) + if !bytes.Equal(expectSum, actualSum) { + return n, awserr.New("InvalidChecksum", + fmt.Sprintf("expected MD5 checksum %s, got %s", + hex.EncodeToString(expectSum), + hex.EncodeToString(actualSum), + ), + nil) + } + } + + return n, err +} + +func (v *md5ValidationReader) Close() error { + return v.rawReader.Close() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go b/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go deleted file mode 100644 index 9fc5df94d3..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go +++ /dev/null @@ -1,36 +0,0 @@ -package s3 - -import ( - "crypto/md5" - "encoding/base64" - "io" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// contentMD5 computes and sets the HTTP Content-MD5 header for requests that -// require it. -func contentMD5(r *request.Request) { - h := md5.New() - - // hash the body. seek back to the first position after reading to reset - // the body for transmission. copy errors may be assumed to be from the - // body. - _, err := io.Copy(h, r.Body) - if err != nil { - r.Error = awserr.New("ContentMD5", "failed to read body", err) - return - } - _, err = r.Body.Seek(0, 0) - if err != nil { - r.Error = awserr.New("ContentMD5", "failed to seek body", err) - return - } - - // encode the md5 checksum in base64 and set the request header. - sum := h.Sum(nil) - sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum))) - base64.StdEncoding.Encode(sum64, sum) - r.HTTPRequest.Header.Set("Content-MD5", string(sum64)) -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go index 8463347230..95f2456363 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -3,6 +3,7 @@ package s3 import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3err" ) func init() { @@ -21,6 +22,7 @@ func defaultInitClientFn(c *client.Client) { // S3 uses custom error unmarshaling logic c.Handlers.UnmarshalError.Clear() c.Handlers.UnmarshalError.PushBack(unmarshalError) + c.Handlers.UnmarshalError.PushBackNamed(s3err.RequestFailureWrapperHandler()) } func defaultInitRequestFn(r *request.Request) { @@ -31,6 +33,7 @@ func defaultInitRequestFn(r *request.Request) { switch r.Operation.Name { case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy, opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration, + opPutObjectLegalHold, opPutObjectRetention, opPutObjectLockConfiguration, opPutBucketReplication: // These S3 operations require Content-MD5 to be set r.Handlers.Build.PushBack(contentMD5) @@ -42,5 +45,30 @@ func defaultInitRequestFn(r *request.Request) { r.Handlers.Validate.PushFront(populateLocationConstraint) case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload: r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError) + r.Handlers.Unmarshal.PushBackNamed(s3err.RequestFailureWrapperHandler()) + case opPutObject, opUploadPart: + r.Handlers.Build.PushBack(computeBodyHashes) + // Disabled until #1837 root issue is resolved. + // case opGetObject: + // r.Handlers.Build.PushBack(askForTxEncodingAppendMD5) + // r.Handlers.Unmarshal.PushBack(useMD5ValidationReader) } } + +// bucketGetter is an accessor interface to grab the "Bucket" field from +// an S3 type. +type bucketGetter interface { + getBucket() string +} + +// sseCustomerKeyGetter is an accessor interface to grab the "SSECustomerKey" +// field from an S3 type. +type sseCustomerKeyGetter interface { + getSSECustomerKey() string +} + +// copySourceSSECustomerKeyGetter is an accessor interface to grab the +// "CopySourceSSECustomerKey" field from an S3 type. +type copySourceSSECustomerKeyGetter interface { + getCopySourceSSECustomerKey() string +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go index f045fd0db9..0def02255a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go @@ -10,69 +10,17 @@ // // Using the Client // -// To use the client for Amazon Simple Storage Service you will first need -// to create a new instance of it. +// To contact Amazon Simple Storage Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. // -// When creating a client for an AWS service you'll first need to have a Session -// already created. The Session provides configuration that can be shared -// between multiple service clients. Additional configuration can be applied to -// the Session and service's client when they are constructed. The aws package's -// Config type contains several fields such as Region for the AWS Region the -// client should make API requests too. The optional Config value can be provided -// as the variadic argument for Sessions and client creation. -// -// Once the service's client is created you can use it to make API requests the -// AWS service. These clients are safe to use concurrently. -// -// // Create a session to share configuration, and load external configuration. -// sess := session.Must(session.NewSession()) -// -// // Create the service's client with the session. -// svc := s3.New(sess) -// -// See the SDK's documentation for more information on how to use service clients. +// See the SDK's documentation for more information on how to use the SDK. // https://docs.aws.amazon.com/sdk-for-go/api/ // -// See aws package's Config type for more information on configuration options. +// See aws.Config documentation for more information on configuring SDK clients. // https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config // // See the Amazon Simple Storage Service client S3 for more -// information on creating the service's client. +// information on creating client for this service. // https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New -// -// Once the client is created you can make an API request to the service. -// Each API method takes a input parameter, and returns the service response -// and an error. -// -// The API method will document which error codes the service can be returned -// by the operation if the service models the API operation's errors. These -// errors will also be available as const strings prefixed with "ErrCode". -// -// result, err := svc.AbortMultipartUpload(params) -// if err != nil { -// // Cast err to awserr.Error to handle specific error codes. -// aerr, ok := err.(awserr.Error) -// if ok && aerr.Code() == { -// // Specific error code handling -// } -// return err -// } -// -// fmt.Println("AbortMultipartUpload result:") -// fmt.Println(result) -// -// Using the Client with Context -// -// The service's client also provides methods to make API requests with a Context -// value. This allows you to control the timeout, and cancellation of pending -// requests. These methods also take request Option as variadic parameter to apply -// additional configuration to the API request. -// -// ctx := context.Background() -// -// result, err := svc.AbortMultipartUploadWithContext(ctx, params) -// -// See the request package documentation for more information on using Context pattern -// with the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/request/ package s3 diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go index b794a63ba2..39b912c260 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go @@ -35,7 +35,7 @@ // // The s3manager package's Downloader provides concurrently downloading of Objects // from S3. The Downloader will write S3 Object content with an io.WriterAt. -// Once the Downloader instance is created you can call Upload concurrently from +// Once the Downloader instance is created you can call Download concurrently from // multiple goroutines safely. // // // The session the S3 Downloader will use @@ -56,7 +56,7 @@ // Key: aws.String(myString), // }) // if err != nil { -// return fmt.Errorf("failed to upload file, %v", err) +// return fmt.Errorf("failed to download file, %v", err) // } // fmt.Printf("file downloaded, %d bytes\n", n) // diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go index ec3ffe4484..a7fbc2de2f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go @@ -8,7 +8,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" ) @@ -113,15 +112,9 @@ func updateEndpointForAccelerate(r *request.Request) { // Attempts to retrieve the bucket name from the request input parameters. // If no bucket is found, or the field is empty "", false will be returned. func bucketNameFromReqParams(params interface{}) (string, bool) { - b, _ := awsutil.ValuesAtPath(params, "Bucket") - if len(b) == 0 { - return "", false - } - - if bucket, ok := b[0].(*string); ok { - if bucketStr := aws.StringValue(bucket); bucketStr != "" { - return bucketStr, true - } + if iface, ok := params.(bucketGetter); ok { + b := iface.getBucket() + return b, len(b) > 0 } return "", false diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go index 614e477d3b..d17dcc9dad 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go @@ -29,8 +29,9 @@ var initRequest func(*request.Request) // Service information constants const ( - ServiceName = "s3" // Service endpoint prefix API calls made to. - EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. + ServiceName = "s3" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "S3" // ServiceID is a unique identifer of a specific service. ) // New creates a new instance of the S3 client with a session. @@ -55,6 +56,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio cfg, metadata.ClientInfo{ ServiceName: ServiceName, + ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, Endpoint: endpoint, @@ -65,12 +67,16 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Sign.PushBackNamed(v4.BuildNamedHandler(v4.SignRequestHandler.Name, func(s *v4.Signer) { + s.DisableURIPathEscaping = true + })) svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + svc.Handlers.UnmarshalStream.PushBackNamed(restxml.UnmarshalHandler) + // Run custom client initialization if present if initClient != nil { initClient(svc.Client) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go index 268ea2fb45..8010c4fa19 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go @@ -5,17 +5,27 @@ import ( "encoding/base64" "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" ) var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil) func validateSSERequiresSSL(r *request.Request) { - if r.HTTPRequest.URL.Scheme != "https" { - p, _ := awsutil.ValuesAtPath(r.Params, "SSECustomerKey||CopySourceSSECustomerKey") - if len(p) > 0 { + if r.HTTPRequest.URL.Scheme == "https" { + return + } + + if iface, ok := r.Params.(sseCustomerKeyGetter); ok { + if len(iface.getSSECustomerKey()) > 0 { + r.Error = errSSERequiresSSL + return + } + } + + if iface, ok := r.Params.(copySourceSSECustomerKeyGetter); ok { + if len(iface.getCopySourceSSECustomerKey()) > 0 { r.Error = errSSERequiresSSL + return } } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go index 5a78fd3370..fde3050f95 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go @@ -7,17 +7,22 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" ) func copyMultipartStatusOKUnmarhsalError(r *request.Request) { b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", "unable to read response body", err) + r.Error = awserr.NewRequestFailure( + awserr.New("SerializationError", "unable to read response body", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) return } body := bytes.NewReader(b) r.HTTPResponse.Body = ioutil.NopCloser(body) - defer body.Seek(0, 0) + defer body.Seek(0, sdkio.SeekStart) if body.Len() == 0 { // If there is no body don't attempt to parse the body. diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go index bcca8627af..12c0612c8d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go @@ -23,22 +23,17 @@ func unmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() defer io.Copy(ioutil.Discard, r.HTTPResponse.Body) - hostID := r.HTTPResponse.Header.Get("X-Amz-Id-2") - // Bucket exists in a different region, and request needs // to be made to the correct region. if r.HTTPResponse.StatusCode == http.StatusMovedPermanently { - r.Error = requestFailure{ - RequestFailure: awserr.NewRequestFailure( - awserr.New("BucketRegionError", - fmt.Sprintf("incorrect region, the bucket is not in '%s' region", - aws.StringValue(r.Config.Region)), - nil), - r.HTTPResponse.StatusCode, - r.RequestID, - ), - hostID: hostID, - } + r.Error = awserr.NewRequestFailure( + awserr.New("BucketRegionError", + fmt.Sprintf("incorrect region, the bucket is not in '%s' region", + aws.StringValue(r.Config.Region)), + nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) return } @@ -63,14 +58,11 @@ func unmarshalError(r *request.Request) { errMsg = statusText } - r.Error = requestFailure{ - RequestFailure: awserr.NewRequestFailure( - awserr.New(errCode, errMsg, err), - r.HTTPResponse.StatusCode, - r.RequestID, - ), - hostID: hostID, - } + r.Error = awserr.NewRequestFailure( + awserr.New(errCode, errMsg, err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) } // A RequestFailure provides access to the S3 Request ID and Host ID values @@ -83,21 +75,3 @@ type RequestFailure interface { // Host ID is the S3 Host ID needed for debug, and contacting support HostID() string } - -type requestFailure struct { - awserr.RequestFailure - - hostID string -} - -func (r requestFailure) Error() string { - extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s", - r.StatusCode(), r.RequestID(), r.hostID) - return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr()) -} -func (r requestFailure) String() string { - return r.Error() -} -func (r requestFailure) HostID() string { - return r.hostID -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go index cccfa8c2b3..2596c694b5 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go @@ -11,7 +11,7 @@ import ( // WaitUntilBucketExists uses the Amazon S3 API operation // HeadBucket to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error { return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input) @@ -72,7 +72,7 @@ func (c *S3) WaitUntilBucketExistsWithContext(ctx aws.Context, input *HeadBucket // WaitUntilBucketNotExists uses the Amazon S3 API operation // HeadBucket to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error { return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input) @@ -118,7 +118,7 @@ func (c *S3) WaitUntilBucketNotExistsWithContext(ctx aws.Context, input *HeadBuc // WaitUntilObjectExists uses the Amazon S3 API operation // HeadObject to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error { return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input) @@ -169,7 +169,7 @@ func (c *S3) WaitUntilObjectExistsWithContext(ctx aws.Context, input *HeadObject // WaitUntilObjectNotExists uses the Amazon S3 API operation // HeadObject to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error { return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index e5c105fed8..ee908f9167 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -14,19 +14,18 @@ const opAssumeRole = "AssumeRole" // AssumeRoleRequest generates a "aws/request.Request" representing the // client's request for the AssumeRole operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See AssumeRole for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AssumeRole method directly -// instead. +// See AssumeRole for more information on using the AssumeRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AssumeRoleRequest method. // req, resp := client.AssumeRoleRequest(params) @@ -36,7 +35,7 @@ const opAssumeRole = "AssumeRole" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { op := &request.Operation{ Name: opAssumeRole, @@ -89,9 +88,18 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction) // in the IAM User Guide. // -// The temporary security credentials are valid for the duration that you specified -// when calling AssumeRole, which can be from 900 seconds (15 minutes) to a -// maximum of 3600 seconds (1 hour). The default is 1 hour. +// By default, the temporary security credentials created by AssumeRole last +// for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI operations but +// does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. // // The temporary security credentials created by AssumeRole can be used to make // API calls to any AWS service with the following exception: you cannot call @@ -122,7 +130,12 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // the user to call AssumeRole on the ARN of the role in the other account. // If the user is in the same account as the role, then you can either attach // a policy to the user (identical to the previous different account user), -// or you can add the user as a principal directly in the role's trust policy +// or you can add the user as a principal directly in the role's trust policy. +// In this case, the trust policy acts as the only resource-based policy in +// IAM, and users in the same account as the role do not need explicit permission +// to assume the role. For more information about trust policies and resource-based +// policies, see IAM Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// in the IAM User Guide. // // Using MFA with AssumeRole // @@ -169,7 +182,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { req, out := c.AssumeRoleRequest(input) return out, req.Send() @@ -195,19 +208,18 @@ const opAssumeRoleWithSAML = "AssumeRoleWithSAML" // AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the // client's request for the AssumeRoleWithSAML operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See AssumeRoleWithSAML for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AssumeRoleWithSAML method directly -// instead. +// See AssumeRoleWithSAML for more information on using the AssumeRoleWithSAML +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AssumeRoleWithSAMLRequest method. // req, resp := client.AssumeRoleWithSAMLRequest(params) @@ -217,7 +229,7 @@ const opAssumeRoleWithSAML = "AssumeRoleWithSAML" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { op := &request.Operation{ Name: opAssumeRoleWithSAML, @@ -249,11 +261,20 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // an access key ID, a secret access key, and a security token. Applications // can use these temporary security credentials to sign calls to AWS services. // -// The temporary security credentials are valid for the duration that you specified -// when calling AssumeRole, or until the time specified in the SAML authentication -// response's SessionNotOnOrAfter value, whichever is shorter. The duration -// can be from 900 seconds (15 minutes) to a maximum of 3600 seconds (1 hour). -// The default is 1 hour. +// By default, the temporary security credentials created by AssumeRoleWithSAML +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. Your role session lasts for the +// duration that you specify, or until the time specified in the SAML authentication +// response's SessionNotOnOrAfter value, whichever is shorter. You can provide +// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session +// duration setting for the role. This setting can have a value from 1 hour +// to 12 hours. To learn how to view the maximum value for your role, see View +// the Maximum Session Duration Setting for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI operations but +// does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. // // The temporary security credentials created by AssumeRoleWithSAML can be used // to make API calls to any AWS service with the following exception: you cannot @@ -343,7 +364,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { req, out := c.AssumeRoleWithSAMLRequest(input) return out, req.Send() @@ -369,19 +390,18 @@ const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" // AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the // client's request for the AssumeRoleWithWebIdentity operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See AssumeRoleWithWebIdentity for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AssumeRoleWithWebIdentity method directly -// instead. +// See AssumeRoleWithWebIdentity for more information on using the AssumeRoleWithWebIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AssumeRoleWithWebIdentityRequest method. // req, resp := client.AssumeRoleWithWebIdentityRequest(params) @@ -391,7 +411,7 @@ const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { op := &request.Operation{ Name: opAssumeRoleWithWebIdentity, @@ -441,9 +461,18 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // key ID, a secret access key, and a security token. Applications can use these // temporary security credentials to sign calls to AWS service APIs. // -// The credentials are valid for the duration that you specified when calling -// AssumeRoleWithWebIdentity, which can be from 900 seconds (15 minutes) to -// a maximum of 3600 seconds (1 hour). The default is 1 hour. +// By default, the temporary security credentials created by AssumeRoleWithWebIdentity +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI operations but +// does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. // // The temporary security credentials created by AssumeRoleWithWebIdentity can // be used to make API calls to any AWS service with the following exception: @@ -495,7 +524,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // the information from these providers to get and use temporary security // credentials. // -// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/4617974389850313). +// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). // This article discusses web identity federation and shows an example of // how to use web identity federation to get access to content in Amazon // S3. @@ -546,7 +575,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { req, out := c.AssumeRoleWithWebIdentityRequest(input) return out, req.Send() @@ -572,19 +601,18 @@ const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" // DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the // client's request for the DecodeAuthorizationMessage operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See DecodeAuthorizationMessage for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DecodeAuthorizationMessage method directly -// instead. +// See DecodeAuthorizationMessage for more information on using the DecodeAuthorizationMessage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DecodeAuthorizationMessageRequest method. // req, resp := client.DecodeAuthorizationMessageRequest(params) @@ -594,7 +622,7 @@ const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { op := &request.Operation{ Name: opDecodeAuthorizationMessage, @@ -659,7 +687,7 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // invalid. This can happen if the token contains invalid characters, such as // linebreaks. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { req, out := c.DecodeAuthorizationMessageRequest(input) return out, req.Send() @@ -685,19 +713,18 @@ const opGetCallerIdentity = "GetCallerIdentity" // GetCallerIdentityRequest generates a "aws/request.Request" representing the // client's request for the GetCallerIdentity operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetCallerIdentity for usage and error information. +// See GetCallerIdentity for more information on using the GetCallerIdentity +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetCallerIdentity method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetCallerIdentityRequest method. // req, resp := client.GetCallerIdentityRequest(params) @@ -707,7 +734,7 @@ const opGetCallerIdentity = "GetCallerIdentity" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { op := &request.Operation{ Name: opGetCallerIdentity, @@ -735,7 +762,7 @@ func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *requ // // See the AWS API reference guide for AWS Security Token Service's // API operation GetCallerIdentity for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { req, out := c.GetCallerIdentityRequest(input) return out, req.Send() @@ -761,19 +788,18 @@ const opGetFederationToken = "GetFederationToken" // GetFederationTokenRequest generates a "aws/request.Request" representing the // client's request for the GetFederationToken operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See GetFederationToken for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetFederationToken method directly -// instead. +// See GetFederationToken for more information on using the GetFederationToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetFederationTokenRequest method. // req, resp := client.GetFederationTokenRequest(params) @@ -783,7 +809,7 @@ const opGetFederationToken = "GetFederationToken" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { op := &request.Operation{ Name: opGetFederationToken, @@ -905,7 +931,7 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { req, out := c.GetFederationTokenRequest(input) return out, req.Send() @@ -931,19 +957,18 @@ const opGetSessionToken = "GetSessionToken" // GetSessionTokenRequest generates a "aws/request.Request" representing the // client's request for the GetSessionToken operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetSessionToken for usage and error information. +// See GetSessionToken for more information on using the GetSessionToken +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetSessionToken method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetSessionTokenRequest method. // req, resp := client.GetSessionTokenRequest(params) @@ -953,7 +978,7 @@ const opGetSessionToken = "GetSessionToken" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { op := &request.Operation{ Name: opGetSessionToken, @@ -1034,7 +1059,7 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { req, out := c.GetSessionTokenRequest(input) return out, req.Send() @@ -1056,20 +1081,27 @@ func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionToken return out, req.Send() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleRequest type AssumeRoleInput struct { _ struct{} `type:"structure"` // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set - // to 3600 seconds. + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. // - // This is separate from the duration of a console session that you might request - // using the returned credentials. The request to the federation endpoint for - // a console sign-in token takes a SessionDuration parameter that specifies - // the maximum length of the console session, separately from the DurationSeconds - // parameter on this API. For more information, see Creating a URL that Enables - // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` @@ -1248,7 +1280,6 @@ func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { // Contains the response to a successful AssumeRole request, including temporary // AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleResponse type AssumeRoleOutput struct { _ struct{} `type:"structure"` @@ -1302,22 +1333,30 @@ func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLRequest type AssumeRoleWithSAMLInput struct { _ struct{} `type:"structure"` - // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set - // to 3600 seconds. An expiration can also be specified in the SAML authentication - // response's SessionNotOnOrAfter value. The actual expiration time is whichever - // value is shorter. + // The duration, in seconds, of the role session. Your role session lasts for + // the duration that you specify for the DurationSeconds parameter, or until + // the time specified in the SAML authentication response's SessionNotOnOrAfter + // value, whichever is shorter. You can provide a DurationSeconds value from + // 900 seconds (15 minutes) up to the maximum session duration setting for the + // role. This setting can have a value from 1 hour to 12 hours. If you specify + // a value higher than this setting, the operation fails. For example, if you + // specify a session duration of 12 hours, but your administrator set the maximum + // session duration to 6 hours, your operation fails. To learn how to view the + // maximum value for your role, see View the Maximum Session Duration Setting + // for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. // - // This is separate from the duration of a console session that you might request - // using the returned credentials. The request to the federation endpoint for - // a console sign-in token takes a SessionDuration parameter that specifies - // the maximum length of the console session, separately from the DurationSeconds - // parameter on this API. For more information, see Enabling SAML 2.0 Federated - // Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html) + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` @@ -1443,7 +1482,6 @@ func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAML // Contains the response to a successful AssumeRoleWithSAML request, including // temporary AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLResponse type AssumeRoleWithSAMLOutput struct { _ struct{} `type:"structure"` @@ -1555,20 +1593,27 @@ func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLO return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityRequest type AssumeRoleWithWebIdentityInput struct { _ struct{} `type:"structure"` // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set - // to 3600 seconds. + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. // - // This is separate from the duration of a console session that you might request - // using the returned credentials. The request to the federation endpoint for - // a console sign-in token takes a SessionDuration parameter that specifies - // the maximum length of the console session, separately from the DurationSeconds - // parameter on this API. For more information, see Creating a URL that Enables - // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` @@ -1718,7 +1763,6 @@ func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRo // Contains the response to a successful AssumeRoleWithWebIdentity request, // including temporary AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityResponse type AssumeRoleWithWebIdentityOutput struct { _ struct{} `type:"structure"` @@ -1811,7 +1855,6 @@ func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v strin // The identifiers for the temporary security credentials that the operation // returns. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser type AssumedRoleUser struct { _ struct{} `type:"structure"` @@ -1854,7 +1897,6 @@ func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser { } // AWS credentials for API authentication. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/Credentials type Credentials struct { _ struct{} `type:"structure"` @@ -1866,7 +1908,7 @@ type Credentials struct { // The date on which the current credentials expire. // // Expiration is a required field - Expiration *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + Expiration *time.Time `type:"timestamp" required:"true"` // The secret access key that can be used to sign requests. // @@ -1913,7 +1955,6 @@ func (s *Credentials) SetSessionToken(v string) *Credentials { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageRequest type DecodeAuthorizationMessageInput struct { _ struct{} `type:"structure"` @@ -1958,7 +1999,6 @@ func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAut // A document that contains additional information about the authorization status // of a request from an encoded message that is returned in response to an AWS // request. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageResponse type DecodeAuthorizationMessageOutput struct { _ struct{} `type:"structure"` @@ -1983,7 +2023,6 @@ func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAu } // Identifiers for the federated user that is associated with the credentials. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/FederatedUser type FederatedUser struct { _ struct{} `type:"structure"` @@ -2024,7 +2063,6 @@ func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityRequest type GetCallerIdentityInput struct { _ struct{} `type:"structure"` } @@ -2041,7 +2079,6 @@ func (s GetCallerIdentityInput) GoString() string { // Contains the response to a successful GetCallerIdentity request, including // information about the entity making the request. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityResponse type GetCallerIdentityOutput struct { _ struct{} `type:"structure"` @@ -2087,7 +2124,6 @@ func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenRequest type GetFederationTokenInput struct { _ struct{} `type:"structure"` @@ -2196,7 +2232,6 @@ func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { // Contains the response to a successful GetFederationToken request, including // temporary AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenResponse type GetFederationTokenOutput struct { _ struct{} `type:"structure"` @@ -2249,7 +2284,6 @@ func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTo return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenRequest type GetSessionTokenInput struct { _ struct{} `type:"structure"` @@ -2334,7 +2368,6 @@ func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput { // Contains the response to a successful GetSessionToken request, including // temporary AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenResponse type GetSessionTokenOutput struct { _ struct{} `type:"structure"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go index d2af518cfa..ef681ab0c6 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -56,69 +56,17 @@ // // Using the Client // -// To use the client for AWS Security Token Service you will first need -// to create a new instance of it. +// To contact AWS Security Token Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. // -// When creating a client for an AWS service you'll first need to have a Session -// already created. The Session provides configuration that can be shared -// between multiple service clients. Additional configuration can be applied to -// the Session and service's client when they are constructed. The aws package's -// Config type contains several fields such as Region for the AWS Region the -// client should make API requests too. The optional Config value can be provided -// as the variadic argument for Sessions and client creation. -// -// Once the service's client is created you can use it to make API requests the -// AWS service. These clients are safe to use concurrently. -// -// // Create a session to share configuration, and load external configuration. -// sess := session.Must(session.NewSession()) -// -// // Create the service's client with the session. -// svc := sts.New(sess) -// -// See the SDK's documentation for more information on how to use service clients. +// See the SDK's documentation for more information on how to use the SDK. // https://docs.aws.amazon.com/sdk-for-go/api/ // -// See aws package's Config type for more information on configuration options. +// See aws.Config documentation for more information on configuring SDK clients. // https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config // // See the AWS Security Token Service client STS for more -// information on creating the service's client. +// information on creating client for this service. // https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New -// -// Once the client is created you can make an API request to the service. -// Each API method takes a input parameter, and returns the service response -// and an error. -// -// The API method will document which error codes the service can be returned -// by the operation if the service models the API operation's errors. These -// errors will also be available as const strings prefixed with "ErrCode". -// -// result, err := svc.AssumeRole(params) -// if err != nil { -// // Cast err to awserr.Error to handle specific error codes. -// aerr, ok := err.(awserr.Error) -// if ok && aerr.Code() == { -// // Specific error code handling -// } -// return err -// } -// -// fmt.Println("AssumeRole result:") -// fmt.Println(result) -// -// Using the Client with Context -// -// The service's client also provides methods to make API requests with a Context -// value. This allows you to control the timeout, and cancellation of pending -// requests. These methods also take request Option as variadic parameter to apply -// additional configuration to the API request. -// -// ctx := context.Background() -// -// result, err := svc.AssumeRoleWithContext(ctx, params) -// -// See the request package documentation for more information on using Context pattern -// with the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/request/ package sts diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go index 1ee5839e04..185c914d1b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -29,8 +29,9 @@ var initRequest func(*request.Request) // Service information constants const ( - ServiceName = "sts" // Service endpoint prefix API calls made to. - EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. + ServiceName = "sts" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "STS" // ServiceID is a unique identifer of a specific service. ) // New creates a new instance of the STS client with a session. @@ -55,6 +56,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio cfg, metadata.ClientInfo{ ServiceName: ServiceName, + ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, Endpoint: endpoint, diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE index c836416192..bc52e96f2b 100644 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -2,7 +2,7 @@ ISC License Copyright (c) 2012-2016 Dave Collins -Permission to use, copy, modify, and distribute this software for any +Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go index 8a4a6589a2..792994785e 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -16,7 +16,9 @@ // when the code is not running on Google App Engine, compiled by GopherJS, and // "-tags safe" is not added to the go build command line. The "disableunsafe" // tag is deprecated and thus should not be used. -// +build !js,!appengine,!safe,!disableunsafe +// Go versions prior to 1.4 are disabled because they use a different layout +// for interfaces which make the implementation of unsafeReflectValue more complex. +// +build !js,!appengine,!safe,!disableunsafe,go1.4 package spew @@ -34,80 +36,49 @@ const ( ptrSize = unsafe.Sizeof((*byte)(nil)) ) +type flag uintptr + var ( - // offsetPtr, offsetScalar, and offsetFlag are the offsets for the - // internal reflect.Value fields. These values are valid before golang - // commit ecccf07e7f9d which changed the format. The are also valid - // after commit 82f48826c6c7 which changed the format again to mirror - // the original format. Code in the init function updates these offsets - // as necessary. - offsetPtr = uintptr(ptrSize) - offsetScalar = uintptr(0) - offsetFlag = uintptr(ptrSize * 2) - - // flagKindWidth and flagKindShift indicate various bits that the - // reflect package uses internally to track kind information. - // - // flagRO indicates whether or not the value field of a reflect.Value is - // read-only. - // - // flagIndir indicates whether the value field of a reflect.Value is - // the actual data or a pointer to the data. - // - // These values are valid before golang commit 90a7c3c86944 which - // changed their positions. Code in the init function updates these - // flags as necessary. - flagKindWidth = uintptr(5) - flagKindShift = uintptr(flagKindWidth - 1) - flagRO = uintptr(1 << 0) - flagIndir = uintptr(1 << 1) + // flagRO indicates whether the value field of a reflect.Value + // is read-only. + flagRO flag + + // flagAddr indicates whether the address of the reflect.Value's + // value may be taken. + flagAddr flag ) -func init() { - // Older versions of reflect.Value stored small integers directly in the - // ptr field (which is named val in the older versions). Versions - // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named - // scalar for this purpose which unfortunately came before the flag - // field, so the offset of the flag field is different for those - // versions. - // - // This code constructs a new reflect.Value from a known small integer - // and checks if the size of the reflect.Value struct indicates it has - // the scalar field. When it does, the offsets are updated accordingly. - vv := reflect.ValueOf(0xf00) - if unsafe.Sizeof(vv) == (ptrSize * 4) { - offsetScalar = ptrSize * 2 - offsetFlag = ptrSize * 3 - } +// flagKindMask holds the bits that make up the kind +// part of the flags field. In all the supported versions, +// it is in the lower 5 bits. +const flagKindMask = flag(0x1f) - // Commit 90a7c3c86944 changed the flag positions such that the low - // order bits are the kind. This code extracts the kind from the flags - // field and ensures it's the correct type. When it's not, the flag - // order has been changed to the newer format, so the flags are updated - // accordingly. - upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) - upfv := *(*uintptr)(upf) - flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { - flagKindShift = 0 - flagRO = 1 << 5 - flagIndir = 1 << 6 - - // Commit adf9b30e5594 modified the flags to separate the - // flagRO flag into two bits which specifies whether or not the - // field is embedded. This causes flagIndir to move over a bit - // and means that flagRO is the combination of either of the - // original flagRO bit and the new bit. - // - // This code detects the change by extracting what used to be - // the indirect bit to ensure it's set. When it's not, the flag - // order has been changed to the newer format, so the flags are - // updated accordingly. - if upfv&flagIndir == 0 { - flagRO = 3 << 5 - flagIndir = 1 << 7 - } +// Different versions of Go have used different +// bit layouts for the flags type. This table +// records the known combinations. +var okFlags = []struct { + ro, addr flag +}{{ + // From Go 1.4 to 1.5 + ro: 1 << 5, + addr: 1 << 7, +}, { + // Up to Go tip. + ro: 1<<5 | 1<<6, + addr: 1 << 8, +}} + +var flagValOffset = func() uintptr { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") } + return field.Offset +}() + +// flagField returns a pointer to the flag field of a reflect.Value. +func flagField(v *reflect.Value) *flag { + return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) } // unsafeReflectValue converts the passed reflect.Value into a one that bypasses @@ -119,34 +90,56 @@ func init() { // This allows us to check for implementations of the Stringer and error // interfaces to be used for pretty printing ordinarily unaddressable and // inaccessible values such as unexported struct fields. -func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { - indirects := 1 - vt := v.Type() - upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) - rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) - if rvf&flagIndir != 0 { - vt = reflect.PtrTo(v.Type()) - indirects++ - } else if offsetScalar != 0 { - // The value is in the scalar field when it's not one of the - // reference types. - switch vt.Kind() { - case reflect.Uintptr: - case reflect.Chan: - case reflect.Func: - case reflect.Map: - case reflect.Ptr: - case reflect.UnsafePointer: - default: - upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + - offsetScalar) - } +func unsafeReflectValue(v reflect.Value) reflect.Value { + if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { + return v } + flagFieldPtr := flagField(&v) + *flagFieldPtr &^= flagRO + *flagFieldPtr |= flagAddr + return v +} - pv := reflect.NewAt(vt, upv) - rv = pv - for i := 0; i < indirects; i++ { - rv = rv.Elem() +// Sanity checks against future reflect package changes +// to the type or semantics of the Value.flag field. +func init() { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { + panic("reflect.Value flag field has changed kind") + } + type t0 int + var t struct { + A t0 + // t0 will have flagEmbedRO set. + t0 + // a will have flagStickyRO set + a t0 + } + vA := reflect.ValueOf(t).FieldByName("A") + va := reflect.ValueOf(t).FieldByName("a") + vt0 := reflect.ValueOf(t).FieldByName("t0") + + // Infer flagRO from the difference between the flags + // for the (otherwise identical) fields in t. + flagPublic := *flagField(&vA) + flagWithRO := *flagField(&va) | *flagField(&vt0) + flagRO = flagPublic ^ flagWithRO + + // Infer flagAddr from the difference between a value + // taken from a pointer and not. + vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") + flagNoPtr := *flagField(&vA) + flagPtr := *flagField(&vPtrA) + flagAddr = flagNoPtr ^ flagPtr + + // Check that the inferred flags tally with one of the known versions. + for _, f := range okFlags { + if flagRO == f.ro && flagAddr == f.addr { + return + } } - return rv + panic("reflect.Value read-only flag has changed semantics") } diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go index 1fe3cf3d5d..205c28d68c 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -16,7 +16,7 @@ // when the code is running on Google App Engine, compiled by GopherJS, or // "-tags safe" is added to the go build command line. The "disableunsafe" // tag is deprecated and thus should not be used. -// +build js appengine safe disableunsafe +// +build js appengine safe disableunsafe !go1.4 package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go index 7c519ff47a..1be8ce9457 100644 --- a/vendor/github.com/davecgh/go-spew/spew/common.go +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -180,7 +180,7 @@ func printComplex(w io.Writer, c complex128, floatPrecision int) { w.Write(closeParenBytes) } -// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' +// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' // prefix to Writer w. func printHexPtr(w io.Writer, p uintptr) { // Null pointer. diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go index df1d582a72..f78d89fc1f 100644 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -35,16 +35,16 @@ var ( // cCharRE is a regular expression that matches a cgo char. // It is used to detect character arrays to hexdump them. - cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") + cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) // cUnsignedCharRE is a regular expression that matches a cgo unsigned // char. It is used to detect unsigned character arrays to hexdump // them. - cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") + cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) // cUint8tCharRE is a regular expression that matches a cgo uint8_t. // It is used to detect uint8_t arrays to hexdump them. - cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") + cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) ) // dumpState contains information about the state of a dump operation. @@ -143,10 +143,10 @@ func (d *dumpState) dumpPtr(v reflect.Value) { // Display dereferenced value. d.w.Write(openParenBytes) switch { - case nilFound == true: + case nilFound: d.w.Write(nilAngleBytes) - case cycleFound == true: + case cycleFound: d.w.Write(circularBytes) default: diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go index c49875bacb..b04edb7d7a 100644 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -182,10 +182,10 @@ func (f *formatState) formatPtr(v reflect.Value) { // Display dereferenced value. switch { - case nilFound == true: + case nilFound: f.fs.Write(nilAngleBytes) - case cycleFound == true: + case cycleFound: f.fs.Write(circularShortBytes) default: diff --git a/vendor/github.com/go-ini/ini/.gitignore b/vendor/github.com/go-ini/ini/.gitignore deleted file mode 100644 index c5203bf6e7..0000000000 --- a/vendor/github.com/go-ini/ini/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -testdata/conf_out.ini -ini.sublime-project -ini.sublime-workspace -testdata/conf_reflect.ini -.idea diff --git a/vendor/github.com/go-ini/ini/.travis.yml b/vendor/github.com/go-ini/ini/.travis.yml deleted file mode 100644 index 0064ba1d7c..0000000000 --- a/vendor/github.com/go-ini/ini/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -sudo: false -language: go - -go: - - 1.4 - - 1.5 - - 1.6 - - tip - -script: - - go get -v github.com/smartystreets/goconvey - - go test -v -cover -race - -notifications: - email: - - u@gogs.io diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE deleted file mode 100644 index 37ec93a14f..0000000000 --- a/vendor/github.com/go-ini/ini/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile deleted file mode 100644 index ac034e5258..0000000000 --- a/vendor/github.com/go-ini/ini/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -.PHONY: build test bench vet - -build: vet bench - -test: - go test -v -cover -race - -bench: - go test -v -cover -race -test.bench=. -test.benchmem - -vet: - go vet diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md deleted file mode 100644 index 22a42344aa..0000000000 --- a/vendor/github.com/go-ini/ini/README.md +++ /dev/null @@ -1,734 +0,0 @@ -INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) -=== - -![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) - -Package ini provides INI file read and write functionality in Go. - -[简体中文](README_ZH.md) - -## Feature - -- Load multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites. -- Read with recursion values. -- Read with parent-child sections. -- Read with auto-increment key names. -- Read with multiple-line values. -- Read with tons of helper methods. -- Read and convert values to Go types. -- Read and **WRITE** comments of sections and keys. -- Manipulate sections, keys and comments with ease. -- Keep sections and keys in order as you parse and save. - -## Installation - -To use a tagged revision: - - go get gopkg.in/ini.v1 - -To use with latest changes: - - go get github.com/go-ini/ini - -Please add `-u` flag to update in the future. - -### Testing - -If you want to test on your machine, please apply `-t` flag: - - go get -t gopkg.in/ini.v1 - -Please add `-u` flag to update in the future. - -## Getting Started - -### Loading from data sources - -A **Data Source** is either raw data in type `[]byte`, a file name with type `string` or `io.ReadCloser`. You can load **as many data sources as you want**. Passing other types will simply return an error. - -```go -cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data")))) -``` - -Or start with an empty object: - -```go -cfg := ini.Empty() -``` - -When you cannot decide how many data sources to load at the beginning, you will still be able to **Append()** them later. - -```go -err := cfg.Append("other file", []byte("other raw data")) -``` - -If you have a list of files with possibilities that some of them may not available at the time, and you don't know exactly which ones, you can use `LooseLoad` to ignore nonexistent files without returning error. - -```go -cfg, err := ini.LooseLoad("filename", "filename_404") -``` - -The cool thing is, whenever the file is available to load while you're calling `Reload` method, it will be counted as usual. - -#### Ignore cases of key name - -When you do not care about cases of section and key names, you can use `InsensitiveLoad` to force all names to be lowercased while parsing. - -```go -cfg, err := ini.InsensitiveLoad("filename") -//... - -// sec1 and sec2 are the exactly same section object -sec1, err := cfg.GetSection("Section") -sec2, err := cfg.GetSection("SecTIOn") - -// key1 and key2 are the exactly same key object -key1, err := cfg.GetKey("Key") -key2, err := cfg.GetKey("KeY") -``` - -#### MySQL-like boolean key - -MySQL's configuration allows a key without value as follows: - -```ini -[mysqld] -... -skip-host-cache -skip-name-resolve -``` - -By default, this is considered as missing value. But if you know you're going to deal with those cases, you can assign advanced load options: - -```go -cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) -``` - -The value of those keys are always `true`, and when you save to a file, it will keep in the same foramt as you read. - -#### Comment - -Take care that following format will be treated as comment: - -1. Line begins with `#` or `;` -2. Words after `#` or `;` -3. Words after section name (i.e words after `[some section name]`) - -If you want to save a value with `#` or `;`, please quote them with ``` ` ``` or ``` """ ```. - -### Working with sections - -To get a section, you would need to: - -```go -section, err := cfg.GetSection("section name") -``` - -For a shortcut for default section, just give an empty string as name: - -```go -section, err := cfg.GetSection("") -``` - -When you're pretty sure the section exists, following code could make your life easier: - -```go -section := cfg.Section("section name") -``` - -What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you. - -To create a new section: - -```go -err := cfg.NewSection("new section") -``` - -To get a list of sections or section names: - -```go -sections := cfg.Sections() -names := cfg.SectionStrings() -``` - -### Working with keys - -To get a key under a section: - -```go -key, err := cfg.Section("").GetKey("key name") -``` - -Same rule applies to key operations: - -```go -key := cfg.Section("").Key("key name") -``` - -To check if a key exists: - -```go -yes := cfg.Section("").HasKey("key name") -``` - -To create a new key: - -```go -err := cfg.Section("").NewKey("name", "value") -``` - -To get a list of keys or key names: - -```go -keys := cfg.Section("").Keys() -names := cfg.Section("").KeyStrings() -``` - -To get a clone hash of keys and corresponding values: - -```go -hash := cfg.Section("").KeysHash() -``` - -### Working with values - -To get a string value: - -```go -val := cfg.Section("").Key("key name").String() -``` - -To validate key value on the fly: - -```go -val := cfg.Section("").Key("key name").Validate(func(in string) string { - if len(in) == 0 { - return "default" - } - return in -}) -``` - -If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance): - -```go -val := cfg.Section("").Key("key name").Value() -``` - -To check if raw value exists: - -```go -yes := cfg.Section("").HasValue("test value") -``` - -To get value with types: - -```go -// For boolean values: -// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On -// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off -v, err = cfg.Section("").Key("BOOL").Bool() -v, err = cfg.Section("").Key("FLOAT64").Float64() -v, err = cfg.Section("").Key("INT").Int() -v, err = cfg.Section("").Key("INT64").Int64() -v, err = cfg.Section("").Key("UINT").Uint() -v, err = cfg.Section("").Key("UINT64").Uint64() -v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) -v, err = cfg.Section("").Key("TIME").Time() // RFC3339 - -v = cfg.Section("").Key("BOOL").MustBool() -v = cfg.Section("").Key("FLOAT64").MustFloat64() -v = cfg.Section("").Key("INT").MustInt() -v = cfg.Section("").Key("INT64").MustInt64() -v = cfg.Section("").Key("UINT").MustUint() -v = cfg.Section("").Key("UINT64").MustUint64() -v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) -v = cfg.Section("").Key("TIME").MustTime() // RFC3339 - -// Methods start with Must also accept one argument for default value -// when key not found or fail to parse value to given type. -// Except method MustString, which you have to pass a default value. - -v = cfg.Section("").Key("String").MustString("default") -v = cfg.Section("").Key("BOOL").MustBool(true) -v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) -v = cfg.Section("").Key("INT").MustInt(10) -v = cfg.Section("").Key("INT64").MustInt64(99) -v = cfg.Section("").Key("UINT").MustUint(3) -v = cfg.Section("").Key("UINT64").MustUint64(6) -v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) -v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 -``` - -What if my value is three-line long? - -```ini -[advance] -ADDRESS = """404 road, -NotFound, State, 5000 -Earth""" -``` - -Not a problem! - -```go -cfg.Section("advance").Key("ADDRESS").String() - -/* --- start --- -404 road, -NotFound, State, 5000 -Earth ------- end --- */ -``` - -That's cool, how about continuation lines? - -```ini -[advance] -two_lines = how about \ - continuation lines? -lots_of_lines = 1 \ - 2 \ - 3 \ - 4 -``` - -Piece of cake! - -```go -cfg.Section("advance").Key("two_lines").String() // how about continuation lines? -cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 -``` - -Well, I hate continuation lines, how do I disable that? - -```go -cfg, err := ini.LoadSources(ini.LoadOptions{ - IgnoreContinuation: true, -}, "filename") -``` - -Holy crap! - -Note that single quotes around values will be stripped: - -```ini -foo = "some value" // foo: some value -bar = 'some value' // bar: some value -``` - -That's all? Hmm, no. - -#### Helper methods of working with values - -To get value with given candidates: - -```go -v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) -v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) -v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) -v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) -v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) -v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) -v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) -v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 -``` - -Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates. - -To validate value in a given range: - -```go -vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) -vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) -vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) -vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) -vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) -vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) -vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 -``` - -##### Auto-split values into a slice - -To use zero value of type for invalid inputs: - -```go -// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] -// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] -vals = cfg.Section("").Key("STRINGS").Strings(",") -vals = cfg.Section("").Key("FLOAT64S").Float64s(",") -vals = cfg.Section("").Key("INTS").Ints(",") -vals = cfg.Section("").Key("INT64S").Int64s(",") -vals = cfg.Section("").Key("UINTS").Uints(",") -vals = cfg.Section("").Key("UINT64S").Uint64s(",") -vals = cfg.Section("").Key("TIMES").Times(",") -``` - -To exclude invalid values out of result slice: - -```go -// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] -// Input: how, 2.2, are, you -> [2.2] -vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") -vals = cfg.Section("").Key("INTS").ValidInts(",") -vals = cfg.Section("").Key("INT64S").ValidInt64s(",") -vals = cfg.Section("").Key("UINTS").ValidUints(",") -vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") -vals = cfg.Section("").Key("TIMES").ValidTimes(",") -``` - -Or to return nothing but error when have invalid inputs: - -```go -// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] -// Input: how, 2.2, are, you -> error -vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") -vals = cfg.Section("").Key("INTS").StrictInts(",") -vals = cfg.Section("").Key("INT64S").StrictInt64s(",") -vals = cfg.Section("").Key("UINTS").StrictUints(",") -vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") -vals = cfg.Section("").Key("TIMES").StrictTimes(",") -``` - -### Save your configuration - -Finally, it's time to save your configuration to somewhere. - -A typical way to save configuration is writing it to a file: - -```go -// ... -err = cfg.SaveTo("my.ini") -err = cfg.SaveToIndent("my.ini", "\t") -``` - -Another way to save is writing to a `io.Writer` interface: - -```go -// ... -cfg.WriteTo(writer) -cfg.WriteToIndent(writer, "\t") -``` - -By default, spaces are used to align "=" sign between key and values, to disable that: - -```go -ini.PrettyFormat = false -``` - -## Advanced Usage - -### Recursive Values - -For all value of keys, there is a special syntax `%()s`, where `` is the key name in same section or default section, and `%()s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions. - -```ini -NAME = ini - -[author] -NAME = Unknwon -GITHUB = https://github.com/%(NAME)s - -[package] -FULL_NAME = github.com/go-ini/%(NAME)s -``` - -```go -cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon -cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini -``` - -### Parent-child Sections - -You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section. - -```ini -NAME = ini -VERSION = v1 -IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s - -[package] -CLONE_URL = https://%(IMPORT_PATH)s - -[package.sub] -``` - -```go -cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 -``` - -#### Retrieve parent keys available to a child section - -```go -cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] -``` - -### Unparseable Sections - -Sometimes, you have sections that do not contain key-value pairs but raw content, to handle such case, you can use `LoadOptions.UnparsableSections`: - -```go -cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] -<1> This slide has the fuel listed in the wrong units `)) - -body := cfg.Section("COMMENTS").Body() - -/* --- start --- -<1> This slide has the fuel listed in the wrong units ------- end --- */ -``` - -### Auto-increment Key Names - -If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter. - -```ini -[features] --: Support read/write comments of keys and sections --: Support auto-increment of key names --: Support load multiple files to overwrite key values -``` - -```go -cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} -``` - -### Map To Struct - -Want more objective way to play with INI? Cool. - -```ini -Name = Unknwon -age = 21 -Male = true -Born = 1993-01-01T20:17:05Z - -[Note] -Content = Hi is a good man! -Cities = HangZhou, Boston -``` - -```go -type Note struct { - Content string - Cities []string -} - -type Person struct { - Name string - Age int `ini:"age"` - Male bool - Born time.Time - Note - Created time.Time `ini:"-"` -} - -func main() { - cfg, err := ini.Load("path/to/ini") - // ... - p := new(Person) - err = cfg.MapTo(p) - // ... - - // Things can be simpler. - err = ini.MapTo(p, "path/to/ini") - // ... - - // Just map a section? Fine. - n := new(Note) - err = cfg.Section("Note").MapTo(n) - // ... -} -``` - -Can I have default value for field? Absolutely. - -Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type. - -```go -// ... -p := &Person{ - Name: "Joe", -} -// ... -``` - -It's really cool, but what's the point if you can't give me my file back from struct? - -### Reflect From Struct - -Why not? - -```go -type Embeded struct { - Dates []time.Time `delim:"|"` - Places []string `ini:"places,omitempty"` - None []int `ini:",omitempty"` -} - -type Author struct { - Name string `ini:"NAME"` - Male bool - Age int - GPA float64 - NeverMind string `ini:"-"` - *Embeded -} - -func main() { - a := &Author{"Unknwon", true, 21, 2.8, "", - &Embeded{ - []time.Time{time.Now(), time.Now()}, - []string{"HangZhou", "Boston"}, - []int{}, - }} - cfg := ini.Empty() - err = ini.ReflectFrom(cfg, a) - // ... -} -``` - -So, what do I get? - -```ini -NAME = Unknwon -Male = true -Age = 21 -GPA = 2.8 - -[Embeded] -Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 -places = HangZhou,Boston -``` - -#### Name Mapper - -To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name. - -There are 2 built-in name mappers: - -- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key. -- `TitleUnderscore`: it converts to format `title_underscore` then match section or key. - -To use them: - -```go -type Info struct { - PackageName string -} - -func main() { - err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) - // ... - - cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) - // ... - info := new(Info) - cfg.NameMapper = ini.AllCapsUnderscore - err = cfg.MapTo(info) - // ... -} -``` - -Same rules of name mapper apply to `ini.ReflectFromWithMapper` function. - -#### Value Mapper - -To expand values (e.g. from environment variables), you can use the `ValueMapper` to transform values: - -```go -type Env struct { - Foo string `ini:"foo"` -} - -func main() { - cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n") - cfg.ValueMapper = os.ExpandEnv - // ... - env := &Env{} - err = cfg.Section("env").MapTo(env) -} -``` - -This would set the value of `env.Foo` to the value of the environment variable `MY_VAR`. - -#### Other Notes On Map/Reflect - -Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature: - -```go -type Child struct { - Age string -} - -type Parent struct { - Name string - Child -} - -type Config struct { - City string - Parent -} -``` - -Example configuration: - -```ini -City = Boston - -[Parent] -Name = Unknwon - -[Child] -Age = 21 -``` - -What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome. - -```go -type Child struct { - Age string -} - -type Parent struct { - Name string - Child `ini:"Parent"` -} - -type Config struct { - City string - Parent -} -``` - -Example configuration: - -```ini -City = Boston - -[Parent] -Name = Unknwon -Age = 21 -``` - -## Getting Help - -- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) -- [File An Issue](https://github.com/go-ini/ini/issues/new) - -## FAQs - -### What does `BlockMode` field do? - -By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster. - -### Why another INI library? - -Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster. - -To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path) - -## License - -This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/go-ini/ini/README_ZH.md deleted file mode 100644 index 3b4fb6604e..0000000000 --- a/vendor/github.com/go-ini/ini/README_ZH.md +++ /dev/null @@ -1,721 +0,0 @@ -本包提供了 Go 语言中读写 INI 文件的功能。 - -## 功能特性 - -- 支持覆盖加载多个数据源(`[]byte`、文件和 `io.ReadCloser`) -- 支持递归读取键值 -- 支持读取父子分区 -- 支持读取自增键名 -- 支持读取多行的键值 -- 支持大量辅助方法 -- 支持在读取时直接转换为 Go 语言类型 -- 支持读取和 **写入** 分区和键的注释 -- 轻松操作分区、键值和注释 -- 在保存文件时分区和键值会保持原有的顺序 - -## 下载安装 - -使用一个特定版本: - - go get gopkg.in/ini.v1 - -使用最新版: - - go get github.com/go-ini/ini - -如需更新请添加 `-u` 选项。 - -### 测试安装 - -如果您想要在自己的机器上运行测试,请使用 `-t` 标记: - - go get -t gopkg.in/ini.v1 - -如需更新请添加 `-u` 选项。 - -## 开始使用 - -### 从数据源加载 - -一个 **数据源** 可以是 `[]byte` 类型的原始数据,`string` 类型的文件路径或 `io.ReadCloser`。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。 - -```go -cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data")))) -``` - -或者从一个空白的文件开始: - -```go -cfg := ini.Empty() -``` - -当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。 - -```go -err := cfg.Append("other file", []byte("other raw data")) -``` - -当您想要加载一系列文件,但是不能够确定其中哪些文件是不存在的,可以通过调用函数 `LooseLoad` 来忽略它们(`Load` 会因为文件不存在而返回错误): - -```go -cfg, err := ini.LooseLoad("filename", "filename_404") -``` - -更牛逼的是,当那些之前不存在的文件在重新调用 `Reload` 方法的时候突然出现了,那么它们会被正常加载。 - -#### 忽略键名的大小写 - -有时候分区和键的名称大小写混合非常烦人,这个时候就可以通过 `InsensitiveLoad` 将所有分区和键名在读取里强制转换为小写: - -```go -cfg, err := ini.InsensitiveLoad("filename") -//... - -// sec1 和 sec2 指向同一个分区对象 -sec1, err := cfg.GetSection("Section") -sec2, err := cfg.GetSection("SecTIOn") - -// key1 和 key2 指向同一个键对象 -key1, err := cfg.GetKey("Key") -key2, err := cfg.GetKey("KeY") -``` - -#### 类似 MySQL 配置中的布尔值键 - -MySQL 的配置文件中会出现没有具体值的布尔类型的键: - -```ini -[mysqld] -... -skip-host-cache -skip-name-resolve -``` - -默认情况下这被认为是缺失值而无法完成解析,但可以通过高级的加载选项对它们进行处理: - -```go -cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) -``` - -这些键的值永远为 `true`,且在保存到文件时也只会输出键名。 - -#### 关于注释 - -下述几种情况的内容将被视为注释: - -1. 所有以 `#` 或 `;` 开头的行 -2. 所有在 `#` 或 `;` 之后的内容 -3. 分区标签后的文字 (即 `[分区名]` 之后的内容) - -如果你希望使用包含 `#` 或 `;` 的值,请使用 ``` ` ``` 或 ``` """ ``` 进行包覆。 - -### 操作分区(Section) - -获取指定分区: - -```go -section, err := cfg.GetSection("section name") -``` - -如果您想要获取默认分区,则可以用空字符串代替分区名: - -```go -section, err := cfg.GetSection("") -``` - -当您非常确定某个分区是存在的,可以使用以下简便方法: - -```go -section := cfg.Section("section name") -``` - -如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。 - -创建一个分区: - -```go -err := cfg.NewSection("new section") -``` - -获取所有分区对象或名称: - -```go -sections := cfg.Sections() -names := cfg.SectionStrings() -``` - -### 操作键(Key) - -获取某个分区下的键: - -```go -key, err := cfg.Section("").GetKey("key name") -``` - -和分区一样,您也可以直接获取键而忽略错误处理: - -```go -key := cfg.Section("").Key("key name") -``` - -判断某个键是否存在: - -```go -yes := cfg.Section("").HasKey("key name") -``` - -创建一个新的键: - -```go -err := cfg.Section("").NewKey("name", "value") -``` - -获取分区下的所有键或键名: - -```go -keys := cfg.Section("").Keys() -names := cfg.Section("").KeyStrings() -``` - -获取分区下的所有键值对的克隆: - -```go -hash := cfg.Section("").KeysHash() -``` - -### 操作键值(Value) - -获取一个类型为字符串(string)的值: - -```go -val := cfg.Section("").Key("key name").String() -``` - -获取值的同时通过自定义函数进行处理验证: - -```go -val := cfg.Section("").Key("key name").Validate(func(in string) string { - if len(in) == 0 { - return "default" - } - return in -}) -``` - -如果您不需要任何对值的自动转变功能(例如递归读取),可以直接获取原值(这种方式性能最佳): - -```go -val := cfg.Section("").Key("key name").Value() -``` - -判断某个原值是否存在: - -```go -yes := cfg.Section("").HasValue("test value") -``` - -获取其它类型的值: - -```go -// 布尔值的规则: -// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On -// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off -v, err = cfg.Section("").Key("BOOL").Bool() -v, err = cfg.Section("").Key("FLOAT64").Float64() -v, err = cfg.Section("").Key("INT").Int() -v, err = cfg.Section("").Key("INT64").Int64() -v, err = cfg.Section("").Key("UINT").Uint() -v, err = cfg.Section("").Key("UINT64").Uint64() -v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) -v, err = cfg.Section("").Key("TIME").Time() // RFC3339 - -v = cfg.Section("").Key("BOOL").MustBool() -v = cfg.Section("").Key("FLOAT64").MustFloat64() -v = cfg.Section("").Key("INT").MustInt() -v = cfg.Section("").Key("INT64").MustInt64() -v = cfg.Section("").Key("UINT").MustUint() -v = cfg.Section("").Key("UINT64").MustUint64() -v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) -v = cfg.Section("").Key("TIME").MustTime() // RFC3339 - -// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值, -// 当键不存在或者转换失败时,则会直接返回该默认值。 -// 但是,MustString 方法必须传递一个默认值。 - -v = cfg.Seciont("").Key("String").MustString("default") -v = cfg.Section("").Key("BOOL").MustBool(true) -v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) -v = cfg.Section("").Key("INT").MustInt(10) -v = cfg.Section("").Key("INT64").MustInt64(99) -v = cfg.Section("").Key("UINT").MustUint(3) -v = cfg.Section("").Key("UINT64").MustUint64(6) -v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) -v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 -``` - -如果我的值有好多行怎么办? - -```ini -[advance] -ADDRESS = """404 road, -NotFound, State, 5000 -Earth""" -``` - -嗯哼?小 case! - -```go -cfg.Section("advance").Key("ADDRESS").String() - -/* --- start --- -404 road, -NotFound, State, 5000 -Earth ------- end --- */ -``` - -赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办? - -```ini -[advance] -two_lines = how about \ - continuation lines? -lots_of_lines = 1 \ - 2 \ - 3 \ - 4 -``` - -简直是小菜一碟! - -```go -cfg.Section("advance").Key("two_lines").String() // how about continuation lines? -cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 -``` - -可是我有时候觉得两行连在一起特别没劲,怎么才能不自动连接两行呢? - -```go -cfg, err := ini.LoadSources(ini.LoadOptions{ - IgnoreContinuation: true, -}, "filename") -``` - -哇靠给力啊! - -需要注意的是,值两侧的单引号会被自动剔除: - -```ini -foo = "some value" // foo: some value -bar = 'some value' // bar: some value -``` - -这就是全部了?哈哈,当然不是。 - -#### 操作键值的辅助方法 - -获取键值时设定候选值: - -```go -v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) -v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) -v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) -v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) -v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) -v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) -v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) -v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 -``` - -如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。 - -验证获取的值是否在指定范围内: - -```go -vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) -vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) -vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) -vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) -vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) -vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) -vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 -``` - -##### 自动分割键值到切片(slice) - -当存在无效输入时,使用零值代替: - -```go -// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] -// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] -vals = cfg.Section("").Key("STRINGS").Strings(",") -vals = cfg.Section("").Key("FLOAT64S").Float64s(",") -vals = cfg.Section("").Key("INTS").Ints(",") -vals = cfg.Section("").Key("INT64S").Int64s(",") -vals = cfg.Section("").Key("UINTS").Uints(",") -vals = cfg.Section("").Key("UINT64S").Uint64s(",") -vals = cfg.Section("").Key("TIMES").Times(",") -``` - -从结果切片中剔除无效输入: - -```go -// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] -// Input: how, 2.2, are, you -> [2.2] -vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") -vals = cfg.Section("").Key("INTS").ValidInts(",") -vals = cfg.Section("").Key("INT64S").ValidInt64s(",") -vals = cfg.Section("").Key("UINTS").ValidUints(",") -vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") -vals = cfg.Section("").Key("TIMES").ValidTimes(",") -``` - -当存在无效输入时,直接返回错误: - -```go -// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] -// Input: how, 2.2, are, you -> error -vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") -vals = cfg.Section("").Key("INTS").StrictInts(",") -vals = cfg.Section("").Key("INT64S").StrictInt64s(",") -vals = cfg.Section("").Key("UINTS").StrictUints(",") -vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") -vals = cfg.Section("").Key("TIMES").StrictTimes(",") -``` - -### 保存配置 - -终于到了这个时刻,是时候保存一下配置了。 - -比较原始的做法是输出配置到某个文件: - -```go -// ... -err = cfg.SaveTo("my.ini") -err = cfg.SaveToIndent("my.ini", "\t") -``` - -另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中: - -```go -// ... -cfg.WriteTo(writer) -cfg.WriteToIndent(writer, "\t") -``` - -默认情况下,空格将被用于对齐键值之间的等号以美化输出结果,以下代码可以禁用该功能: - -```go -ini.PrettyFormat = false -``` - -## 高级用法 - -### 递归读取键值 - -在获取所有键值的过程中,特殊语法 `%()s` 会被应用,其中 `` 可以是相同分区或者默认分区下的键名。字符串 `%()s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。 - -```ini -NAME = ini - -[author] -NAME = Unknwon -GITHUB = https://github.com/%(NAME)s - -[package] -FULL_NAME = github.com/go-ini/%(NAME)s -``` - -```go -cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon -cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini -``` - -### 读取父子分区 - -您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。 - -```ini -NAME = ini -VERSION = v1 -IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s - -[package] -CLONE_URL = https://%(IMPORT_PATH)s - -[package.sub] -``` - -```go -cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 -``` - -#### 获取上级父分区下的所有键名 - -```go -cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] -``` - -### 无法解析的分区 - -如果遇到一些比较特殊的分区,它们不包含常见的键值对,而是没有固定格式的纯文本,则可以使用 `LoadOptions.UnparsableSections` 进行处理: - -```go -cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] -<1> This slide has the fuel listed in the wrong units `)) - -body := cfg.Section("COMMENTS").Body() - -/* --- start --- -<1> This slide has the fuel listed in the wrong units ------- end --- */ -``` - -### 读取自增键名 - -如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。 - -```ini -[features] --: Support read/write comments of keys and sections --: Support auto-increment of key names --: Support load multiple files to overwrite key values -``` - -```go -cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} -``` - -### 映射到结构 - -想要使用更加面向对象的方式玩转 INI 吗?好主意。 - -```ini -Name = Unknwon -age = 21 -Male = true -Born = 1993-01-01T20:17:05Z - -[Note] -Content = Hi is a good man! -Cities = HangZhou, Boston -``` - -```go -type Note struct { - Content string - Cities []string -} - -type Person struct { - Name string - Age int `ini:"age"` - Male bool - Born time.Time - Note - Created time.Time `ini:"-"` -} - -func main() { - cfg, err := ini.Load("path/to/ini") - // ... - p := new(Person) - err = cfg.MapTo(p) - // ... - - // 一切竟可以如此的简单。 - err = ini.MapTo(p, "path/to/ini") - // ... - - // 嗯哼?只需要映射一个分区吗? - n := new(Note) - err = cfg.Section("Note").MapTo(n) - // ... -} -``` - -结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。 - -```go -// ... -p := &Person{ - Name: "Joe", -} -// ... -``` - -这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用? - -### 从结构反射 - -可是,我有说不能吗? - -```go -type Embeded struct { - Dates []time.Time `delim:"|"` - Places []string `ini:"places,omitempty"` - None []int `ini:",omitempty"` -} - -type Author struct { - Name string `ini:"NAME"` - Male bool - Age int - GPA float64 - NeverMind string `ini:"-"` - *Embeded -} - -func main() { - a := &Author{"Unknwon", true, 21, 2.8, "", - &Embeded{ - []time.Time{time.Now(), time.Now()}, - []string{"HangZhou", "Boston"}, - []int{}, - }} - cfg := ini.Empty() - err = ini.ReflectFrom(cfg, a) - // ... -} -``` - -瞧瞧,奇迹发生了。 - -```ini -NAME = Unknwon -Male = true -Age = 21 -GPA = 2.8 - -[Embeded] -Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 -places = HangZhou,Boston -``` - -#### 名称映射器(Name Mapper) - -为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。 - -目前有 2 款内置的映射器: - -- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。 -- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。 - -使用方法: - -```go -type Info struct{ - PackageName string -} - -func main() { - err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) - // ... - - cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) - // ... - info := new(Info) - cfg.NameMapper = ini.AllCapsUnderscore - err = cfg.MapTo(info) - // ... -} -``` - -使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。 - -#### 值映射器(Value Mapper) - -值映射器允许使用一个自定义函数自动展开值的具体内容,例如:运行时获取环境变量: - -```go -type Env struct { - Foo string `ini:"foo"` -} - -func main() { - cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n") - cfg.ValueMapper = os.ExpandEnv - // ... - env := &Env{} - err = cfg.Section("env").MapTo(env) -} -``` - -本例中,`env.Foo` 将会是运行时所获取到环境变量 `MY_VAR` 的值。 - -#### 映射/反射的其它说明 - -任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联: - -```go -type Child struct { - Age string -} - -type Parent struct { - Name string - Child -} - -type Config struct { - City string - Parent -} -``` - -示例配置文件: - -```ini -City = Boston - -[Parent] -Name = Unknwon - -[Child] -Age = 21 -``` - -很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚! - -```go -type Child struct { - Age string -} - -type Parent struct { - Name string - Child `ini:"Parent"` -} - -type Config struct { - City string - Parent -} -``` - -示例配置文件: - -```ini -City = Boston - -[Parent] -Name = Unknwon -Age = 21 -``` - -## 获取帮助 - -- [API 文档](https://gowalker.org/gopkg.in/ini.v1) -- [创建工单](https://github.com/go-ini/ini/issues/new) - -## 常见问题 - -### 字段 `BlockMode` 是什么? - -默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。 - -### 为什么要写另一个 INI 解析库? - -许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。 - -为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了) diff --git a/vendor/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go deleted file mode 100644 index 80afe74315..0000000000 --- a/vendor/github.com/go-ini/ini/error.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2016 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "fmt" -) - -type ErrDelimiterNotFound struct { - Line string -} - -func IsErrDelimiterNotFound(err error) bool { - _, ok := err.(ErrDelimiterNotFound) - return ok -} - -func (err ErrDelimiterNotFound) Error() string { - return fmt.Sprintf("key-value delimiter not found: %s", err.Line) -} diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go deleted file mode 100644 index 77e0dbde64..0000000000 --- a/vendor/github.com/go-ini/ini/ini.go +++ /dev/null @@ -1,535 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -// Package ini provides INI file read and write functionality in Go. -package ini - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "regexp" - "runtime" - "strconv" - "strings" - "sync" - "time" -) - -const ( - // Name for default section. You can use this constant or the string literal. - // In most of cases, an empty string is all you need to access the section. - DEFAULT_SECTION = "DEFAULT" - - // Maximum allowed depth when recursively substituing variable names. - _DEPTH_VALUES = 99 - _VERSION = "1.23.1" -) - -// Version returns current package version literal. -func Version() string { - return _VERSION -} - -var ( - // Delimiter to determine or compose a new line. - // This variable will be changed to "\r\n" automatically on Windows - // at package init time. - LineBreak = "\n" - - // Variable regexp pattern: %(variable)s - varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) - - // Indicate whether to align "=" sign with spaces to produce pretty output - // or reduce all possible spaces for compact format. - PrettyFormat = true - - // Explicitly write DEFAULT section header - DefaultHeader = false -) - -func init() { - if runtime.GOOS == "windows" { - LineBreak = "\r\n" - } -} - -func inSlice(str string, s []string) bool { - for _, v := range s { - if str == v { - return true - } - } - return false -} - -// dataSource is an interface that returns object which can be read and closed. -type dataSource interface { - ReadCloser() (io.ReadCloser, error) -} - -// sourceFile represents an object that contains content on the local file system. -type sourceFile struct { - name string -} - -func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { - return os.Open(s.name) -} - -type bytesReadCloser struct { - reader io.Reader -} - -func (rc *bytesReadCloser) Read(p []byte) (n int, err error) { - return rc.reader.Read(p) -} - -func (rc *bytesReadCloser) Close() error { - return nil -} - -// sourceData represents an object that contains content in memory. -type sourceData struct { - data []byte -} - -func (s *sourceData) ReadCloser() (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewReader(s.data)), nil -} - -// sourceReadCloser represents an input stream with Close method. -type sourceReadCloser struct { - reader io.ReadCloser -} - -func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { - return s.reader, nil -} - -// File represents a combination of a or more INI file(s) in memory. -type File struct { - // Should make things safe, but sometimes doesn't matter. - BlockMode bool - // Make sure data is safe in multiple goroutines. - lock sync.RWMutex - - // Allow combination of multiple data sources. - dataSources []dataSource - // Actual data is stored here. - sections map[string]*Section - - // To keep data in order. - sectionList []string - - options LoadOptions - - NameMapper - ValueMapper -} - -// newFile initializes File object with given data sources. -func newFile(dataSources []dataSource, opts LoadOptions) *File { - return &File{ - BlockMode: true, - dataSources: dataSources, - sections: make(map[string]*Section), - sectionList: make([]string, 0, 10), - options: opts, - } -} - -func parseDataSource(source interface{}) (dataSource, error) { - switch s := source.(type) { - case string: - return sourceFile{s}, nil - case []byte: - return &sourceData{s}, nil - case io.ReadCloser: - return &sourceReadCloser{s}, nil - default: - return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) - } -} - -type LoadOptions struct { - // Loose indicates whether the parser should ignore nonexistent files or return error. - Loose bool - // Insensitive indicates whether the parser forces all section and key names to lowercase. - Insensitive bool - // IgnoreContinuation indicates whether to ignore continuation lines while parsing. - IgnoreContinuation bool - // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. - // This type of keys are mostly used in my.cnf. - AllowBooleanKeys bool - // Some INI formats allow group blocks that store a block of raw content that doesn't otherwise - // conform to key/value pairs. Specify the names of those blocks here. - UnparseableSections []string -} - -func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { - sources := make([]dataSource, len(others)+1) - sources[0], err = parseDataSource(source) - if err != nil { - return nil, err - } - for i := range others { - sources[i+1], err = parseDataSource(others[i]) - if err != nil { - return nil, err - } - } - f := newFile(sources, opts) - if err = f.Reload(); err != nil { - return nil, err - } - return f, nil -} - -// Load loads and parses from INI data sources. -// Arguments can be mixed of file name with string type, or raw data in []byte. -// It will return error if list contains nonexistent files. -func Load(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{}, source, others...) -} - -// LooseLoad has exactly same functionality as Load function -// except it ignores nonexistent files instead of returning error. -func LooseLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{Loose: true}, source, others...) -} - -// InsensitiveLoad has exactly same functionality as Load function -// except it forces all section and key names to be lowercased. -func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{Insensitive: true}, source, others...) -} - -// Empty returns an empty file object. -func Empty() *File { - // Ignore error here, we sure our data is good. - f, _ := Load([]byte("")) - return f -} - -// NewSection creates a new section. -func (f *File) NewSection(name string) (*Section, error) { - if len(name) == 0 { - return nil, errors.New("error creating new section: empty section name") - } else if f.options.Insensitive && name != DEFAULT_SECTION { - name = strings.ToLower(name) - } - - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - if inSlice(name, f.sectionList) { - return f.sections[name], nil - } - - f.sectionList = append(f.sectionList, name) - f.sections[name] = newSection(f, name) - return f.sections[name], nil -} - -// NewRawSection creates a new section with an unparseable body. -func (f *File) NewRawSection(name, body string) (*Section, error) { - section, err := f.NewSection(name) - if err != nil { - return nil, err - } - - section.isRawSection = true - section.rawBody = body - return section, nil -} - -// NewSections creates a list of sections. -func (f *File) NewSections(names ...string) (err error) { - for _, name := range names { - if _, err = f.NewSection(name); err != nil { - return err - } - } - return nil -} - -// GetSection returns section by given name. -func (f *File) GetSection(name string) (*Section, error) { - if len(name) == 0 { - name = DEFAULT_SECTION - } else if f.options.Insensitive { - name = strings.ToLower(name) - } - - if f.BlockMode { - f.lock.RLock() - defer f.lock.RUnlock() - } - - sec := f.sections[name] - if sec == nil { - return nil, fmt.Errorf("section '%s' does not exist", name) - } - return sec, nil -} - -// Section assumes named section exists and returns a zero-value when not. -func (f *File) Section(name string) *Section { - sec, err := f.GetSection(name) - if err != nil { - // Note: It's OK here because the only possible error is empty section name, - // but if it's empty, this piece of code won't be executed. - sec, _ = f.NewSection(name) - return sec - } - return sec -} - -// Section returns list of Section. -func (f *File) Sections() []*Section { - sections := make([]*Section, len(f.sectionList)) - for i := range f.sectionList { - sections[i] = f.Section(f.sectionList[i]) - } - return sections -} - -// SectionStrings returns list of section names. -func (f *File) SectionStrings() []string { - list := make([]string, len(f.sectionList)) - copy(list, f.sectionList) - return list -} - -// DeleteSection deletes a section. -func (f *File) DeleteSection(name string) { - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - if len(name) == 0 { - name = DEFAULT_SECTION - } - - for i, s := range f.sectionList { - if s == name { - f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) - delete(f.sections, name) - return - } - } -} - -func (f *File) reload(s dataSource) error { - r, err := s.ReadCloser() - if err != nil { - return err - } - defer r.Close() - - return f.parse(r) -} - -// Reload reloads and parses all data sources. -func (f *File) Reload() (err error) { - for _, s := range f.dataSources { - if err = f.reload(s); err != nil { - // In loose mode, we create an empty default section for nonexistent files. - if os.IsNotExist(err) && f.options.Loose { - f.parse(bytes.NewBuffer(nil)) - continue - } - return err - } - } - return nil -} - -// Append appends one or more data sources and reloads automatically. -func (f *File) Append(source interface{}, others ...interface{}) error { - ds, err := parseDataSource(source) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - for _, s := range others { - ds, err = parseDataSource(s) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - } - return f.Reload() -} - -// WriteToIndent writes content into io.Writer with given indention. -// If PrettyFormat has been set to be true, -// it will align "=" sign with spaces under each section. -func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { - equalSign := "=" - if PrettyFormat { - equalSign = " = " - } - - // Use buffer to make sure target is safe until finish encoding. - buf := bytes.NewBuffer(nil) - for i, sname := range f.sectionList { - sec := f.Section(sname) - if len(sec.Comment) > 0 { - if sec.Comment[0] != '#' && sec.Comment[0] != ';' { - sec.Comment = "; " + sec.Comment - } - if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil { - return 0, err - } - } - - if i > 0 || DefaultHeader { - if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil { - return 0, err - } - } else { - // Write nothing if default section is empty - if len(sec.keyList) == 0 { - continue - } - } - - if sec.isRawSection { - if _, err = buf.WriteString(sec.rawBody); err != nil { - return 0, err - } - continue - } - - // Count and generate alignment length and buffer spaces using the - // longest key. Keys may be modifed if they contain certain characters so - // we need to take that into account in our calculation. - alignLength := 0 - if PrettyFormat { - for _, kname := range sec.keyList { - keyLength := len(kname) - // First case will surround key by ` and second by """ - if strings.ContainsAny(kname, "\"=:") { - keyLength += 2 - } else if strings.Contains(kname, "`") { - keyLength += 6 - } - - if keyLength > alignLength { - alignLength = keyLength - } - } - } - alignSpaces := bytes.Repeat([]byte(" "), alignLength) - - for _, kname := range sec.keyList { - key := sec.Key(kname) - if len(key.Comment) > 0 { - if len(indent) > 0 && sname != DEFAULT_SECTION { - buf.WriteString(indent) - } - if key.Comment[0] != '#' && key.Comment[0] != ';' { - key.Comment = "; " + key.Comment - } - if _, err = buf.WriteString(key.Comment + LineBreak); err != nil { - return 0, err - } - } - - if len(indent) > 0 && sname != DEFAULT_SECTION { - buf.WriteString(indent) - } - - switch { - case key.isAutoIncrement: - kname = "-" - case strings.ContainsAny(kname, "\"=:"): - kname = "`" + kname + "`" - case strings.Contains(kname, "`"): - kname = `"""` + kname + `"""` - } - if _, err = buf.WriteString(kname); err != nil { - return 0, err - } - - if key.isBooleanType { - continue - } - - // Write out alignment spaces before "=" sign - if PrettyFormat { - buf.Write(alignSpaces[:alignLength-len(kname)]) - } - - val := key.value - // In case key value contains "\n", "`", "\"", "#" or ";" - if strings.ContainsAny(val, "\n`") { - val = `"""` + val + `"""` - } else if strings.ContainsAny(val, "#;") { - val = "`" + val + "`" - } - if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil { - return 0, err - } - } - - // Put a line between sections - if _, err = buf.WriteString(LineBreak); err != nil { - return 0, err - } - } - - return buf.WriteTo(w) -} - -// WriteTo writes file content into io.Writer. -func (f *File) WriteTo(w io.Writer) (int64, error) { - return f.WriteToIndent(w, "") -} - -// SaveToIndent writes content to file system with given value indention. -func (f *File) SaveToIndent(filename, indent string) error { - // Note: Because we are truncating with os.Create, - // so it's safer to save to a temporary file location and rename afte done. - tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp" - defer os.Remove(tmpPath) - - fw, err := os.Create(tmpPath) - if err != nil { - return err - } - - if _, err = f.WriteToIndent(fw, indent); err != nil { - fw.Close() - return err - } - fw.Close() - - // Remove old file and rename the new one. - os.Remove(filename) - return os.Rename(tmpPath, filename) -} - -// SaveTo writes content to file system. -func (f *File) SaveTo(filename string) error { - return f.SaveToIndent(filename, "") -} diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go deleted file mode 100644 index 9738c55a21..0000000000 --- a/vendor/github.com/go-ini/ini/key.go +++ /dev/null @@ -1,633 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "fmt" - "strconv" - "strings" - "time" -) - -// Key represents a key under a section. -type Key struct { - s *Section - name string - value string - isAutoIncrement bool - isBooleanType bool - - Comment string -} - -// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv -type ValueMapper func(string) string - -// Name returns name of key. -func (k *Key) Name() string { - return k.name -} - -// Value returns raw value of key for performance purpose. -func (k *Key) Value() string { - return k.value -} - -// String returns string representation of value. -func (k *Key) String() string { - val := k.value - if k.s.f.ValueMapper != nil { - val = k.s.f.ValueMapper(val) - } - if strings.Index(val, "%") == -1 { - return val - } - - for i := 0; i < _DEPTH_VALUES; i++ { - vr := varPattern.FindString(val) - if len(vr) == 0 { - break - } - - // Take off leading '%(' and trailing ')s'. - noption := strings.TrimLeft(vr, "%(") - noption = strings.TrimRight(noption, ")s") - - // Search in the same section. - nk, err := k.s.GetKey(noption) - if err != nil { - // Search again in default section. - nk, _ = k.s.f.Section("").GetKey(noption) - } - - // Substitute by new value and take off leading '%(' and trailing ')s'. - val = strings.Replace(val, vr, nk.value, -1) - } - return val -} - -// Validate accepts a validate function which can -// return modifed result as key value. -func (k *Key) Validate(fn func(string) string) string { - return fn(k.String()) -} - -// parseBool returns the boolean value represented by the string. -// -// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, -// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. -// Any other value returns an error. -func parseBool(str string) (value bool, err error) { - switch str { - case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": - return true, nil - case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": - return false, nil - } - return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) -} - -// Bool returns bool type value. -func (k *Key) Bool() (bool, error) { - return parseBool(k.String()) -} - -// Float64 returns float64 type value. -func (k *Key) Float64() (float64, error) { - return strconv.ParseFloat(k.String(), 64) -} - -// Int returns int type value. -func (k *Key) Int() (int, error) { - return strconv.Atoi(k.String()) -} - -// Int64 returns int64 type value. -func (k *Key) Int64() (int64, error) { - return strconv.ParseInt(k.String(), 10, 64) -} - -// Uint returns uint type valued. -func (k *Key) Uint() (uint, error) { - u, e := strconv.ParseUint(k.String(), 10, 64) - return uint(u), e -} - -// Uint64 returns uint64 type value. -func (k *Key) Uint64() (uint64, error) { - return strconv.ParseUint(k.String(), 10, 64) -} - -// Duration returns time.Duration type value. -func (k *Key) Duration() (time.Duration, error) { - return time.ParseDuration(k.String()) -} - -// TimeFormat parses with given format and returns time.Time type value. -func (k *Key) TimeFormat(format string) (time.Time, error) { - return time.Parse(format, k.String()) -} - -// Time parses with RFC3339 format and returns time.Time type value. -func (k *Key) Time() (time.Time, error) { - return k.TimeFormat(time.RFC3339) -} - -// MustString returns default value if key value is empty. -func (k *Key) MustString(defaultVal string) string { - val := k.String() - if len(val) == 0 { - k.value = defaultVal - return defaultVal - } - return val -} - -// MustBool always returns value without error, -// it returns false if error occurs. -func (k *Key) MustBool(defaultVal ...bool) bool { - val, err := k.Bool() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatBool(defaultVal[0]) - return defaultVal[0] - } - return val -} - -// MustFloat64 always returns value without error, -// it returns 0.0 if error occurs. -func (k *Key) MustFloat64(defaultVal ...float64) float64 { - val, err := k.Float64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) - return defaultVal[0] - } - return val -} - -// MustInt always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt(defaultVal ...int) int { - val, err := k.Int() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatInt(int64(defaultVal[0]), 10) - return defaultVal[0] - } - return val -} - -// MustInt64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt64(defaultVal ...int64) int64 { - val, err := k.Int64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatInt(defaultVal[0], 10) - return defaultVal[0] - } - return val -} - -// MustUint always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint(defaultVal ...uint) uint { - val, err := k.Uint() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) - return defaultVal[0] - } - return val -} - -// MustUint64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint64(defaultVal ...uint64) uint64 { - val, err := k.Uint64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatUint(defaultVal[0], 10) - return defaultVal[0] - } - return val -} - -// MustDuration always returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { - val, err := k.Duration() - if len(defaultVal) > 0 && err != nil { - k.value = defaultVal[0].String() - return defaultVal[0] - } - return val -} - -// MustTimeFormat always parses with given format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { - val, err := k.TimeFormat(format) - if len(defaultVal) > 0 && err != nil { - k.value = defaultVal[0].Format(format) - return defaultVal[0] - } - return val -} - -// MustTime always parses with RFC3339 format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTime(defaultVal ...time.Time) time.Time { - return k.MustTimeFormat(time.RFC3339, defaultVal...) -} - -// In always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) In(defaultVal string, candidates []string) string { - val := k.String() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InFloat64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { - val := k.MustFloat64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt(defaultVal int, candidates []int) int { - val := k.MustInt() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { - val := k.MustInt64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint(defaultVal uint, candidates []uint) uint { - val := k.MustUint() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { - val := k.MustUint64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTimeFormat always parses with given format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { - val := k.MustTimeFormat(format) - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTime always parses with RFC3339 format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { - return k.InTimeFormat(time.RFC3339, defaultVal, candidates) -} - -// RangeFloat64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { - val := k.MustFloat64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt(defaultVal, min, max int) int { - val := k.MustInt() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { - val := k.MustInt64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeTimeFormat checks if value with given format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { - val := k.MustTimeFormat(format) - if val.Unix() < min.Unix() || val.Unix() > max.Unix() { - return defaultVal - } - return val -} - -// RangeTime checks if value with RFC3339 format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { - return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) -} - -// Strings returns list of string divided by given delimiter. -func (k *Key) Strings(delim string) []string { - str := k.String() - if len(str) == 0 { - return []string{} - } - - vals := strings.Split(str, delim) - for i := range vals { - vals[i] = strings.TrimSpace(vals[i]) - } - return vals -} - -// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Float64s(delim string) []float64 { - vals, _ := k.getFloat64s(delim, true, false) - return vals -} - -// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Ints(delim string) []int { - vals, _ := k.getInts(delim, true, false) - return vals -} - -// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Int64s(delim string) []int64 { - vals, _ := k.getInt64s(delim, true, false) - return vals -} - -// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Uints(delim string) []uint { - vals, _ := k.getUints(delim, true, false) - return vals -} - -// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Uint64s(delim string) []uint64 { - vals, _ := k.getUint64s(delim, true, false) - return vals -} - -// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. -// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). -func (k *Key) TimesFormat(format, delim string) []time.Time { - vals, _ := k.getTimesFormat(format, delim, true, false) - return vals -} - -// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. -// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). -func (k *Key) Times(delim string) []time.Time { - return k.TimesFormat(time.RFC3339, delim) -} - -// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then -// it will not be included to result list. -func (k *Key) ValidFloat64s(delim string) []float64 { - vals, _ := k.getFloat64s(delim, false, false) - return vals -} - -// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will -// not be included to result list. -func (k *Key) ValidInts(delim string) []int { - vals, _ := k.getInts(delim, false, false) - return vals -} - -// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, -// then it will not be included to result list. -func (k *Key) ValidInt64s(delim string) []int64 { - vals, _ := k.getInt64s(delim, false, false) - return vals -} - -// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, -// then it will not be included to result list. -func (k *Key) ValidUints(delim string) []uint { - vals, _ := k.getUints(delim, false, false) - return vals -} - -// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned -// integer, then it will not be included to result list. -func (k *Key) ValidUint64s(delim string) []uint64 { - vals, _ := k.getUint64s(delim, false, false) - return vals -} - -// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. -func (k *Key) ValidTimesFormat(format, delim string) []time.Time { - vals, _ := k.getTimesFormat(format, delim, false, false) - return vals -} - -// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. -func (k *Key) ValidTimes(delim string) []time.Time { - return k.ValidTimesFormat(time.RFC3339, delim) -} - -// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictFloat64s(delim string) ([]float64, error) { - return k.getFloat64s(delim, false, true) -} - -// StrictInts returns list of int divided by given delimiter or error on first invalid input. -func (k *Key) StrictInts(delim string) ([]int, error) { - return k.getInts(delim, false, true) -} - -// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictInt64s(delim string) ([]int64, error) { - return k.getInt64s(delim, false, true) -} - -// StrictUints returns list of uint divided by given delimiter or error on first invalid input. -func (k *Key) StrictUints(delim string) ([]uint, error) { - return k.getUints(delim, false, true) -} - -// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictUint64s(delim string) ([]uint64, error) { - return k.getUint64s(delim, false, true) -} - -// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter -// or error on first invalid input. -func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { - return k.getTimesFormat(format, delim, false, true) -} - -// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter -// or error on first invalid input. -func (k *Key) StrictTimes(delim string) ([]time.Time, error) { - return k.StrictTimesFormat(time.RFC3339, delim) -} - -// getFloat64s returns list of float64 divided by given delimiter. -func (k *Key) getFloat64s(delim string, addInvalid, returnOnInvalid bool) ([]float64, error) { - strs := k.Strings(delim) - vals := make([]float64, 0, len(strs)) - for _, str := range strs { - val, err := strconv.ParseFloat(str, 64) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// getInts returns list of int divided by given delimiter. -func (k *Key) getInts(delim string, addInvalid, returnOnInvalid bool) ([]int, error) { - strs := k.Strings(delim) - vals := make([]int, 0, len(strs)) - for _, str := range strs { - val, err := strconv.Atoi(str) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// getInt64s returns list of int64 divided by given delimiter. -func (k *Key) getInt64s(delim string, addInvalid, returnOnInvalid bool) ([]int64, error) { - strs := k.Strings(delim) - vals := make([]int64, 0, len(strs)) - for _, str := range strs { - val, err := strconv.ParseInt(str, 10, 64) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// getUints returns list of uint divided by given delimiter. -func (k *Key) getUints(delim string, addInvalid, returnOnInvalid bool) ([]uint, error) { - strs := k.Strings(delim) - vals := make([]uint, 0, len(strs)) - for _, str := range strs { - val, err := strconv.ParseUint(str, 10, 0) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, uint(val)) - } - } - return vals, nil -} - -// getUint64s returns list of uint64 divided by given delimiter. -func (k *Key) getUint64s(delim string, addInvalid, returnOnInvalid bool) ([]uint64, error) { - strs := k.Strings(delim) - vals := make([]uint64, 0, len(strs)) - for _, str := range strs { - val, err := strconv.ParseUint(str, 10, 64) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// getTimesFormat parses with given format and returns list of time.Time divided by given delimiter. -func (k *Key) getTimesFormat(format, delim string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { - strs := k.Strings(delim) - vals := make([]time.Time, 0, len(strs)) - for _, str := range strs { - val, err := time.Parse(format, str) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// SetValue changes key value. -func (k *Key) SetValue(v string) { - if k.s.f.BlockMode { - k.s.f.lock.Lock() - defer k.s.f.lock.Unlock() - } - - k.value = v - k.s.keysHash[k.name] = v -} diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go deleted file mode 100644 index b0aabe33b1..0000000000 --- a/vendor/github.com/go-ini/ini/parser.go +++ /dev/null @@ -1,356 +0,0 @@ -// Copyright 2015 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - "unicode" -) - -type tokenType int - -const ( - _TOKEN_INVALID tokenType = iota - _TOKEN_COMMENT - _TOKEN_SECTION - _TOKEN_KEY -) - -type parser struct { - buf *bufio.Reader - isEOF bool - count int - comment *bytes.Buffer -} - -func newParser(r io.Reader) *parser { - return &parser{ - buf: bufio.NewReader(r), - count: 1, - comment: &bytes.Buffer{}, - } -} - -// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. -// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding -func (p *parser) BOM() error { - mask, err := p.buf.Peek(2) - if err != nil && err != io.EOF { - return err - } else if len(mask) < 2 { - return nil - } - - switch { - case mask[0] == 254 && mask[1] == 255: - fallthrough - case mask[0] == 255 && mask[1] == 254: - p.buf.Read(mask) - case mask[0] == 239 && mask[1] == 187: - mask, err := p.buf.Peek(3) - if err != nil && err != io.EOF { - return err - } else if len(mask) < 3 { - return nil - } - if mask[2] == 191 { - p.buf.Read(mask) - } - } - return nil -} - -func (p *parser) readUntil(delim byte) ([]byte, error) { - data, err := p.buf.ReadBytes(delim) - if err != nil { - if err == io.EOF { - p.isEOF = true - } else { - return nil, err - } - } - return data, nil -} - -func cleanComment(in []byte) ([]byte, bool) { - i := bytes.IndexAny(in, "#;") - if i == -1 { - return nil, false - } - return in[i:], true -} - -func readKeyName(in []byte) (string, int, error) { - line := string(in) - - // Check if key name surrounded by quotes. - var keyQuote string - if line[0] == '"' { - if len(line) > 6 && string(line[0:3]) == `"""` { - keyQuote = `"""` - } else { - keyQuote = `"` - } - } else if line[0] == '`' { - keyQuote = "`" - } - - // Get out key name - endIdx := -1 - if len(keyQuote) > 0 { - startIdx := len(keyQuote) - // FIXME: fail case -> """"""name"""=value - pos := strings.Index(line[startIdx:], keyQuote) - if pos == -1 { - return "", -1, fmt.Errorf("missing closing key quote: %s", line) - } - pos += startIdx - - // Find key-value delimiter - i := strings.IndexAny(line[pos+startIdx:], "=:") - if i < 0 { - return "", -1, ErrDelimiterNotFound{line} - } - endIdx = pos + i - return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil - } - - endIdx = strings.IndexAny(line, "=:") - if endIdx < 0 { - return "", -1, ErrDelimiterNotFound{line} - } - return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil -} - -func (p *parser) readMultilines(line, val, valQuote string) (string, error) { - for { - data, err := p.readUntil('\n') - if err != nil { - return "", err - } - next := string(data) - - pos := strings.LastIndex(next, valQuote) - if pos > -1 { - val += next[:pos] - - comment, has := cleanComment([]byte(next[pos:])) - if has { - p.comment.Write(bytes.TrimSpace(comment)) - } - break - } - val += next - if p.isEOF { - return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next) - } - } - return val, nil -} - -func (p *parser) readContinuationLines(val string) (string, error) { - for { - data, err := p.readUntil('\n') - if err != nil { - return "", err - } - next := strings.TrimSpace(string(data)) - - if len(next) == 0 { - break - } - val += next - if val[len(val)-1] != '\\' { - break - } - val = val[:len(val)-1] - } - return val, nil -} - -// hasSurroundedQuote check if and only if the first and last characters -// are quotes \" or \'. -// It returns false if any other parts also contain same kind of quotes. -func hasSurroundedQuote(in string, quote byte) bool { - return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote && - strings.IndexByte(in[1:], quote) == len(in)-2 -} - -func (p *parser) readValue(in []byte, ignoreContinuation bool) (string, error) { - line := strings.TrimLeftFunc(string(in), unicode.IsSpace) - if len(line) == 0 { - return "", nil - } - - var valQuote string - if len(line) > 3 && string(line[0:3]) == `"""` { - valQuote = `"""` - } else if line[0] == '`' { - valQuote = "`" - } - - if len(valQuote) > 0 { - startIdx := len(valQuote) - pos := strings.LastIndex(line[startIdx:], valQuote) - // Check for multi-line value - if pos == -1 { - return p.readMultilines(line, line[startIdx:], valQuote) - } - - return line[startIdx : pos+startIdx], nil - } - - // Won't be able to reach here if value only contains whitespace. - line = strings.TrimSpace(line) - - // Check continuation lines when desired. - if !ignoreContinuation && line[len(line)-1] == '\\' { - return p.readContinuationLines(line[:len(line)-1]) - } - - i := strings.IndexAny(line, "#;") - if i > -1 { - p.comment.WriteString(line[i:]) - line = strings.TrimSpace(line[:i]) - } - - // Trim single quotes - if hasSurroundedQuote(line, '\'') || - hasSurroundedQuote(line, '"') { - line = line[1 : len(line)-1] - } - return line, nil -} - -// parse parses data through an io.Reader. -func (f *File) parse(reader io.Reader) (err error) { - p := newParser(reader) - if err = p.BOM(); err != nil { - return fmt.Errorf("BOM: %v", err) - } - - // Ignore error because default section name is never empty string. - section, _ := f.NewSection(DEFAULT_SECTION) - - var line []byte - var inUnparseableSection bool - for !p.isEOF { - line, err = p.readUntil('\n') - if err != nil { - return err - } - - line = bytes.TrimLeftFunc(line, unicode.IsSpace) - if len(line) == 0 { - continue - } - - // Comments - if line[0] == '#' || line[0] == ';' { - // Note: we do not care ending line break, - // it is needed for adding second line, - // so just clean it once at the end when set to value. - p.comment.Write(line) - continue - } - - // Section - if line[0] == '[' { - // Read to the next ']' (TODO: support quoted strings) - // TODO(unknwon): use LastIndexByte when stop supporting Go1.4 - closeIdx := bytes.LastIndex(line, []byte("]")) - if closeIdx == -1 { - return fmt.Errorf("unclosed section: %s", line) - } - - name := string(line[1:closeIdx]) - section, err = f.NewSection(name) - if err != nil { - return err - } - - comment, has := cleanComment(line[closeIdx+1:]) - if has { - p.comment.Write(comment) - } - - section.Comment = strings.TrimSpace(p.comment.String()) - - // Reset aotu-counter and comments - p.comment.Reset() - p.count = 1 - - inUnparseableSection = false - for i := range f.options.UnparseableSections { - if f.options.UnparseableSections[i] == name || - (f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) { - inUnparseableSection = true - continue - } - } - continue - } - - if inUnparseableSection { - section.isRawSection = true - section.rawBody += string(line) - continue - } - - kname, offset, err := readKeyName(line) - if err != nil { - // Treat as boolean key when desired, and whole line is key name. - if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys { - key, err := section.NewKey(string(line), "true") - if err != nil { - return err - } - key.isBooleanType = true - key.Comment = strings.TrimSpace(p.comment.String()) - p.comment.Reset() - continue - } - return err - } - - // Auto increment. - isAutoIncr := false - if kname == "-" { - isAutoIncr = true - kname = "#" + strconv.Itoa(p.count) - p.count++ - } - - key, err := section.NewKey(kname, "") - if err != nil { - return err - } - key.isAutoIncrement = isAutoIncr - - value, err := p.readValue(line[offset:], f.options.IgnoreContinuation) - if err != nil { - return err - } - key.SetValue(value) - key.Comment = strings.TrimSpace(p.comment.String()) - p.comment.Reset() - } - return nil -} diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go deleted file mode 100644 index 45d2f3bfd0..0000000000 --- a/vendor/github.com/go-ini/ini/section.go +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "errors" - "fmt" - "strings" -) - -// Section represents a config section. -type Section struct { - f *File - Comment string - name string - keys map[string]*Key - keyList []string - keysHash map[string]string - - isRawSection bool - rawBody string -} - -func newSection(f *File, name string) *Section { - return &Section{ - f: f, - name: name, - keys: make(map[string]*Key), - keyList: make([]string, 0, 10), - keysHash: make(map[string]string), - } -} - -// Name returns name of Section. -func (s *Section) Name() string { - return s.name -} - -// Body returns rawBody of Section if the section was marked as unparseable. -// It still follows the other rules of the INI format surrounding leading/trailing whitespace. -func (s *Section) Body() string { - return strings.TrimSpace(s.rawBody) -} - -// NewKey creates a new key to given section. -func (s *Section) NewKey(name, val string) (*Key, error) { - if len(name) == 0 { - return nil, errors.New("error creating new key: empty key name") - } else if s.f.options.Insensitive { - name = strings.ToLower(name) - } - - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - if inSlice(name, s.keyList) { - s.keys[name].value = val - return s.keys[name], nil - } - - s.keyList = append(s.keyList, name) - s.keys[name] = &Key{ - s: s, - name: name, - value: val, - } - s.keysHash[name] = val - return s.keys[name], nil -} - -// GetKey returns key in section by given name. -func (s *Section) GetKey(name string) (*Key, error) { - // FIXME: change to section level lock? - if s.f.BlockMode { - s.f.lock.RLock() - } - if s.f.options.Insensitive { - name = strings.ToLower(name) - } - key := s.keys[name] - if s.f.BlockMode { - s.f.lock.RUnlock() - } - - if key == nil { - // Check if it is a child-section. - sname := s.name - for { - if i := strings.LastIndex(sname, "."); i > -1 { - sname = sname[:i] - sec, err := s.f.GetSection(sname) - if err != nil { - continue - } - return sec.GetKey(name) - } else { - break - } - } - return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) - } - return key, nil -} - -// HasKey returns true if section contains a key with given name. -func (s *Section) HasKey(name string) bool { - key, _ := s.GetKey(name) - return key != nil -} - -// Haskey is a backwards-compatible name for HasKey. -func (s *Section) Haskey(name string) bool { - return s.HasKey(name) -} - -// HasValue returns true if section contains given raw value. -func (s *Section) HasValue(value string) bool { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - for _, k := range s.keys { - if value == k.value { - return true - } - } - return false -} - -// Key assumes named Key exists in section and returns a zero-value when not. -func (s *Section) Key(name string) *Key { - key, err := s.GetKey(name) - if err != nil { - // It's OK here because the only possible error is empty key name, - // but if it's empty, this piece of code won't be executed. - key, _ = s.NewKey(name, "") - return key - } - return key -} - -// Keys returns list of keys of section. -func (s *Section) Keys() []*Key { - keys := make([]*Key, len(s.keyList)) - for i := range s.keyList { - keys[i] = s.Key(s.keyList[i]) - } - return keys -} - -// ParentKeys returns list of keys of parent section. -func (s *Section) ParentKeys() []*Key { - var parentKeys []*Key - sname := s.name - for { - if i := strings.LastIndex(sname, "."); i > -1 { - sname = sname[:i] - sec, err := s.f.GetSection(sname) - if err != nil { - continue - } - parentKeys = append(parentKeys, sec.Keys()...) - } else { - break - } - - } - return parentKeys -} - -// KeyStrings returns list of key names of section. -func (s *Section) KeyStrings() []string { - list := make([]string, len(s.keyList)) - copy(list, s.keyList) - return list -} - -// KeysHash returns keys hash consisting of names and values. -func (s *Section) KeysHash() map[string]string { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - hash := map[string]string{} - for key, value := range s.keysHash { - hash[key] = value - } - return hash -} - -// DeleteKey deletes a key from section. -func (s *Section) DeleteKey(name string) { - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - for i, k := range s.keyList { - if k == name { - s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) - delete(s.keys, name) - return - } - } -} diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go deleted file mode 100644 index 5ef38d865f..0000000000 --- a/vendor/github.com/go-ini/ini/struct.go +++ /dev/null @@ -1,431 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "errors" - "fmt" - "reflect" - "strings" - "time" - "unicode" -) - -// NameMapper represents a ini tag name mapper. -type NameMapper func(string) string - -// Built-in name getters. -var ( - // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. - AllCapsUnderscore NameMapper = func(raw string) string { - newstr := make([]rune, 0, len(raw)) - for i, chr := range raw { - if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { - if i > 0 { - newstr = append(newstr, '_') - } - } - newstr = append(newstr, unicode.ToUpper(chr)) - } - return string(newstr) - } - // TitleUnderscore converts to format title_underscore. - TitleUnderscore NameMapper = func(raw string) string { - newstr := make([]rune, 0, len(raw)) - for i, chr := range raw { - if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { - if i > 0 { - newstr = append(newstr, '_') - } - chr -= ('A' - 'a') - } - newstr = append(newstr, chr) - } - return string(newstr) - } -) - -func (s *Section) parseFieldName(raw, actual string) string { - if len(actual) > 0 { - return actual - } - if s.f.NameMapper != nil { - return s.f.NameMapper(raw) - } - return raw -} - -func parseDelim(actual string) string { - if len(actual) > 0 { - return actual - } - return "," -} - -var reflectTime = reflect.TypeOf(time.Now()).Kind() - -// setSliceWithProperType sets proper values to slice based on its type. -func setSliceWithProperType(key *Key, field reflect.Value, delim string) error { - strs := key.Strings(delim) - numVals := len(strs) - if numVals == 0 { - return nil - } - - var vals interface{} - - sliceOf := field.Type().Elem().Kind() - switch sliceOf { - case reflect.String: - vals = strs - case reflect.Int: - vals = key.Ints(delim) - case reflect.Int64: - vals = key.Int64s(delim) - case reflect.Uint: - vals = key.Uints(delim) - case reflect.Uint64: - vals = key.Uint64s(delim) - case reflect.Float64: - vals = key.Float64s(delim) - case reflectTime: - vals = key.Times(delim) - default: - return fmt.Errorf("unsupported type '[]%s'", sliceOf) - } - - slice := reflect.MakeSlice(field.Type(), numVals, numVals) - for i := 0; i < numVals; i++ { - switch sliceOf { - case reflect.String: - slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) - case reflect.Int: - slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) - case reflect.Int64: - slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) - case reflect.Uint: - slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) - case reflect.Uint64: - slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) - case reflect.Float64: - slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) - case reflectTime: - slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) - } - } - field.Set(slice) - return nil -} - -// setWithProperType sets proper value to field based on its type, -// but it does not return error for failing parsing, -// because we want to use default value that is already assigned to strcut. -func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { - switch t.Kind() { - case reflect.String: - if len(key.String()) == 0 { - return nil - } - field.SetString(key.String()) - case reflect.Bool: - boolVal, err := key.Bool() - if err != nil { - return nil - } - field.SetBool(boolVal) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - durationVal, err := key.Duration() - // Skip zero value - if err == nil && int(durationVal) > 0 { - field.Set(reflect.ValueOf(durationVal)) - return nil - } - - intVal, err := key.Int64() - if err != nil || intVal == 0 { - return nil - } - field.SetInt(intVal) - // byte is an alias for uint8, so supporting uint8 breaks support for byte - case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: - durationVal, err := key.Duration() - // Skip zero value - if err == nil && int(durationVal) > 0 { - field.Set(reflect.ValueOf(durationVal)) - return nil - } - - uintVal, err := key.Uint64() - if err != nil { - return nil - } - field.SetUint(uintVal) - - case reflect.Float32, reflect.Float64: - floatVal, err := key.Float64() - if err != nil { - return nil - } - field.SetFloat(floatVal) - case reflectTime: - timeVal, err := key.Time() - if err != nil { - return nil - } - field.Set(reflect.ValueOf(timeVal)) - case reflect.Slice: - return setSliceWithProperType(key, field, delim) - default: - return fmt.Errorf("unsupported type '%s'", t) - } - return nil -} - -func (s *Section) mapTo(val reflect.Value) error { - if val.Kind() == reflect.Ptr { - val = val.Elem() - } - typ := val.Type() - - for i := 0; i < typ.NumField(); i++ { - field := val.Field(i) - tpField := typ.Field(i) - - tag := tpField.Tag.Get("ini") - if tag == "-" { - continue - } - - opts := strings.SplitN(tag, ",", 2) // strip off possible omitempty - fieldName := s.parseFieldName(tpField.Name, opts[0]) - if len(fieldName) == 0 || !field.CanSet() { - continue - } - - isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous - isStruct := tpField.Type.Kind() == reflect.Struct - if isAnonymous { - field.Set(reflect.New(tpField.Type.Elem())) - } - - if isAnonymous || isStruct { - if sec, err := s.f.GetSection(fieldName); err == nil { - if err = sec.mapTo(field); err != nil { - return fmt.Errorf("error mapping field(%s): %v", fieldName, err) - } - continue - } - } - - if key, err := s.GetKey(fieldName); err == nil { - if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { - return fmt.Errorf("error mapping field(%s): %v", fieldName, err) - } - } - } - return nil -} - -// MapTo maps section to given struct. -func (s *Section) MapTo(v interface{}) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - val = val.Elem() - } else { - return errors.New("cannot map to non-pointer struct") - } - - return s.mapTo(val) -} - -// MapTo maps file to given struct. -func (f *File) MapTo(v interface{}) error { - return f.Section("").MapTo(v) -} - -// MapTo maps data sources to given struct with name mapper. -func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { - cfg, err := Load(source, others...) - if err != nil { - return err - } - cfg.NameMapper = mapper - return cfg.MapTo(v) -} - -// MapTo maps data sources to given struct. -func MapTo(v, source interface{}, others ...interface{}) error { - return MapToWithMapper(v, nil, source, others...) -} - -// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. -func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error { - slice := field.Slice(0, field.Len()) - if field.Len() == 0 { - return nil - } - - var buf bytes.Buffer - sliceOf := field.Type().Elem().Kind() - for i := 0; i < field.Len(); i++ { - switch sliceOf { - case reflect.String: - buf.WriteString(slice.Index(i).String()) - case reflect.Int, reflect.Int64: - buf.WriteString(fmt.Sprint(slice.Index(i).Int())) - case reflect.Uint, reflect.Uint64: - buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) - case reflect.Float64: - buf.WriteString(fmt.Sprint(slice.Index(i).Float())) - case reflectTime: - buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) - default: - return fmt.Errorf("unsupported type '[]%s'", sliceOf) - } - buf.WriteString(delim) - } - key.SetValue(buf.String()[:buf.Len()-1]) - return nil -} - -// reflectWithProperType does the opposite thing as setWithProperType. -func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { - switch t.Kind() { - case reflect.String: - key.SetValue(field.String()) - case reflect.Bool: - key.SetValue(fmt.Sprint(field.Bool())) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - key.SetValue(fmt.Sprint(field.Int())) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - key.SetValue(fmt.Sprint(field.Uint())) - case reflect.Float32, reflect.Float64: - key.SetValue(fmt.Sprint(field.Float())) - case reflectTime: - key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) - case reflect.Slice: - return reflectSliceWithProperType(key, field, delim) - default: - return fmt.Errorf("unsupported type '%s'", t) - } - return nil -} - -// CR: copied from encoding/json/encode.go with modifications of time.Time support. -// TODO: add more test coverage. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflectTime: - return v.Interface().(time.Time).IsZero() - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} - -func (s *Section) reflectFrom(val reflect.Value) error { - if val.Kind() == reflect.Ptr { - val = val.Elem() - } - typ := val.Type() - - for i := 0; i < typ.NumField(); i++ { - field := val.Field(i) - tpField := typ.Field(i) - - tag := tpField.Tag.Get("ini") - if tag == "-" { - continue - } - - opts := strings.SplitN(tag, ",", 2) - if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) { - continue - } - - fieldName := s.parseFieldName(tpField.Name, opts[0]) - if len(fieldName) == 0 || !field.CanSet() { - continue - } - - if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || - (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { - // Note: The only error here is section doesn't exist. - sec, err := s.f.GetSection(fieldName) - if err != nil { - // Note: fieldName can never be empty here, ignore error. - sec, _ = s.f.NewSection(fieldName) - } - if err = sec.reflectFrom(field); err != nil { - return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) - } - continue - } - - // Note: Same reason as secion. - key, err := s.GetKey(fieldName) - if err != nil { - key, _ = s.NewKey(fieldName, "") - } - if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { - return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) - } - - } - return nil -} - -// ReflectFrom reflects secion from given struct. -func (s *Section) ReflectFrom(v interface{}) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - val = val.Elem() - } else { - return errors.New("cannot reflect from non-pointer struct") - } - - return s.reflectFrom(val) -} - -// ReflectFrom reflects file from given struct. -func (f *File) ReflectFrom(v interface{}) error { - return f.Section("").ReflectFrom(v) -} - -// ReflectFrom reflects data sources from given struct with name mapper. -func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { - cfg.NameMapper = mapper - return cfg.ReflectFrom(v) -} - -// ReflectFrom reflects data sources from given struct. -func ReflectFrom(cfg *File, v interface{}) error { - return ReflectFromWithMapper(cfg, v, nil) -} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go index d9aa3c42d6..63b0f08bef 100644 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ b/vendor/github.com/golang/protobuf/proto/decode.go @@ -186,7 +186,6 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) { if b&0x80 == 0 { goto done } - // x -= 0x80 << 63 // Always zero. return 0, errOverflow diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go new file mode 100644 index 0000000000..35b882c09a --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/deprecated.go @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "errors" + +// Deprecated: do not use. +type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } + +// Deprecated: do not use. +func GetStats() Stats { return Stats{} } + +// Deprecated: do not use. +func MarshalMessageSet(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSet([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func MarshalMessageSetJSON(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSetJSON([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go index d4db5a1c14..f9b6e41b3c 100644 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ b/vendor/github.com/golang/protobuf/proto/equal.go @@ -246,7 +246,8 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { return false } - m1, m2 := e1.value, e2.value + m1 := extensionAsLegacyType(e1.value) + m2 := extensionAsLegacyType(e2.value) if m1 == nil && m2 == nil { // Both have only encoded form. diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go index 816a3b9d6c..fa88add30a 100644 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -185,9 +185,25 @@ type Extension struct { // extension will have only enc set. When such an extension is // accessed using GetExtension (or GetExtensions) desc and value // will be set. - desc *ExtensionDesc + desc *ExtensionDesc + + // value is a concrete value for the extension field. Let the type of + // desc.ExtensionType be the "API type" and the type of Extension.value + // be the "storage type". The API type and storage type are the same except: + // * For scalars (except []byte), the API type uses *T, + // while the storage type uses T. + // * For repeated fields, the API type uses []T, while the storage type + // uses *[]T. + // + // The reason for the divergence is so that the storage type more naturally + // matches what is expected of when retrieving the values through the + // protobuf reflection APIs. + // + // The value may only be populated if desc is also populated. value interface{} - enc []byte + + // enc is the raw bytes for the extension field. + enc []byte } // SetRawExtension is for testing only. @@ -334,7 +350,7 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { // descriptors with the same field number. return nil, errors.New("proto: descriptor conflict") } - return e.value, nil + return extensionAsLegacyType(e.value), nil } if extension.ExtensionType == nil { @@ -349,11 +365,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { // Remember the decoded version and drop the encoded version. // That way it is safe to mutate what we return. - e.value = v + e.value = extensionAsStorageType(v) e.desc = extension e.enc = nil emap[extension.Field] = e - return e.value, nil + return extensionAsLegacyType(e.value), nil } // defaultExtensionValue returns the default value for extension. @@ -488,7 +504,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error } typ := reflect.TypeOf(extension.ExtensionType) if typ != reflect.TypeOf(value) { - return errors.New("proto: bad extension value type") + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) } // nil extension values need to be caught early, because the // encoder can't distinguish an ErrNil due to a nil extension @@ -500,7 +516,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error } extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: value} + extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)} return nil } @@ -541,3 +557,51 @@ func RegisterExtension(desc *ExtensionDesc) { func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { return extensionMaps[reflect.TypeOf(pb).Elem()] } + +// extensionAsLegacyType converts an value in the storage type as the API type. +// See Extension.value. +func extensionAsLegacyType(v interface{}) interface{} { + switch rv := reflect.ValueOf(v); rv.Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + // Represent primitive types as a pointer to the value. + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() + case reflect.Ptr: + // Represent slice types as the value itself. + switch rv.Type().Elem().Kind() { + case reflect.Slice: + if rv.IsNil() { + v = reflect.Zero(rv.Type().Elem()).Interface() + } else { + v = rv.Elem().Interface() + } + } + } + return v +} + +// extensionAsStorageType converts an value in the API type as the storage type. +// See Extension.value. +func extensionAsStorageType(v interface{}) interface{} { + switch rv := reflect.ValueOf(v); rv.Kind() { + case reflect.Ptr: + // Represent slice types as the value itself. + switch rv.Type().Elem().Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + if rv.IsNil() { + v = reflect.Zero(rv.Type().Elem()).Interface() + } else { + v = rv.Elem().Interface() + } + } + case reflect.Slice: + // Represent slice types as a pointer to the value. + if rv.Type().Elem().Kind() != reflect.Uint8 { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() + } + } + return v +} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go index 75565cc6dc..fdd328bb7f 100644 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -341,26 +341,6 @@ type Message interface { ProtoMessage() } -// Stats records allocation details about the protocol buffer encoders -// and decoders. Useful for tuning the library itself. -type Stats struct { - Emalloc uint64 // mallocs in encode - Dmalloc uint64 // mallocs in decode - Encode uint64 // number of encodes - Decode uint64 // number of decodes - Chit uint64 // number of cache hits - Cmiss uint64 // number of cache misses - Size uint64 // number of sizes -} - -// Set to true to enable stats collection. -const collectStats = false - -var stats Stats - -// GetStats returns a copy of the global Stats structure. -func GetStats() Stats { return stats } - // A Buffer is a buffer manager for marshaling and unmarshaling // protocol buffers. It may be reused between invocations to // reduce memory usage. It is not necessary to use a Buffer; @@ -960,13 +940,19 @@ func isProto3Zero(v reflect.Value) bool { return false } -// ProtoPackageIsVersion2 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion2 = true +const ( + // ProtoPackageIsVersion3 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + ProtoPackageIsVersion3 = true + + // ProtoPackageIsVersion2 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + ProtoPackageIsVersion2 = true -// ProtoPackageIsVersion1 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion1 = true + // ProtoPackageIsVersion1 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + ProtoPackageIsVersion1 = true +) // InternalMessageInfo is a type used internally by generated .pb.go files. // This type is not intended to be used by non-generated code. diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go index 3b6ca41d5e..f48a756761 100644 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ b/vendor/github.com/golang/protobuf/proto/message_set.go @@ -36,13 +36,7 @@ package proto */ import ( - "bytes" - "encoding/json" "errors" - "fmt" - "reflect" - "sort" - "sync" ) // errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. @@ -145,46 +139,9 @@ func skipVarint(buf []byte) []byte { return buf[i+1:] } -// MarshalMessageSet encodes the extension map represented by m in the message set wire format. -// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSet(exts interface{}) ([]byte, error) { - return marshalMessageSet(exts, false) -} - -// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. -func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { - switch exts := exts.(type) { - case *XXX_InternalExtensions: - var u marshalInfo - siz := u.sizeMessageSet(exts) - b := make([]byte, 0, siz) - return u.appendMessageSet(b, exts, deterministic) - - case map[int32]Extension: - // This is an old-style extension map. - // Wrap it in a new-style XXX_InternalExtensions. - ie := XXX_InternalExtensions{ - p: &struct { - mu sync.Mutex - extensionMap map[int32]Extension - }{ - extensionMap: exts, - }, - } - - var u marshalInfo - siz := u.sizeMessageSet(&ie) - b := make([]byte, 0, siz) - return u.appendMessageSet(b, &ie, deterministic) - - default: - return nil, errors.New("proto: not an extension map") - } -} - -// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. // It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSet(buf []byte, exts interface{}) error { +func unmarshalMessageSet(buf []byte, exts interface{}) error { var m map[int32]Extension switch exts := exts.(type) { case *XXX_InternalExtensions: @@ -222,93 +179,3 @@ func UnmarshalMessageSet(buf []byte, exts interface{}) error { } return nil } - -// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. -// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - var mu sync.Locker - m, mu = exts.extensionsRead() - if m != nil { - // Keep the extensions map locked until we're done marshaling to prevent - // races between marshaling and unmarshaling the lazily-{en,de}coded - // values. - mu.Lock() - defer mu.Unlock() - } - case map[int32]Extension: - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - var b bytes.Buffer - b.WriteByte('{') - - // Process the map in key order for deterministic output. - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) // int32Slice defined in text.go - - for i, id := range ids { - ext := m[id] - msd, ok := messageSetMap[id] - if !ok { - // Unknown type; we can't render it, so skip it. - continue - } - - if i > 0 && b.Len() > 1 { - b.WriteByte(',') - } - - fmt.Fprintf(&b, `"[%s]":`, msd.name) - - x := ext.value - if x == nil { - x = reflect.New(msd.t.Elem()).Interface() - if err := Unmarshal(ext.enc, x.(Message)); err != nil { - return nil, err - } - } - d, err := json.Marshal(x) - if err != nil { - return nil, err - } - b.Write(d) - } - b.WriteByte('}') - return b.Bytes(), nil -} - -// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. -// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { - // Common-case fast path. - if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { - return nil - } - - // This is fairly tricky, and it's not clear that it is needed. - return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") -} - -// A global registry of types that can be used in a MessageSet. - -var messageSetMap = make(map[int32]messageSetDesc) - -type messageSetDesc struct { - t reflect.Type // pointer to struct - name string -} - -// RegisterMessageSetType is called from the generated code. -func RegisterMessageSetType(m Message, fieldNum int32, name string) { - messageSetMap[fieldNum] = messageSetDesc{ - t: reflect.TypeOf(m), - name: name, - } -} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go index b6cad90834..94fa9194a8 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -79,10 +79,13 @@ func toPointer(i *Message) pointer { // toAddrPointer converts an interface to a pointer that points to // the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { +func toAddrPointer(i *interface{}, isptr, deref bool) pointer { v := reflect.ValueOf(*i) u := reflect.New(v.Type()) u.Elem().Set(v) + if deref { + u = u.Elem() + } return pointer{v: u} } diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go index d55a335d94..dbfffe071b 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -85,16 +85,21 @@ func toPointer(i *Message) pointer { // toAddrPointer converts an interface to a pointer that points to // the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { +func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) { // Super-tricky - read or get the address of data word of interface value. if isptr { // The interface is of pointer type, thus it is a direct interface. // The data word is the pointer data itself. We take its address. - return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } else { + // The interface is not of pointer type. The data word is the pointer + // to the data. + p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } - // The interface is not of pointer type. The data word is the pointer - // to the data. - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} + if deref { + p.p = *(*unsafe.Pointer)(p.p) + } + return p } // valToPointer converts v to a pointer. v must be of pointer type. diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go index 50b99b83a8..79668ff5c5 100644 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -334,9 +334,6 @@ func GetProperties(t reflect.Type) *StructProperties { sprop, ok := propertiesMap[t] propertiesMu.RUnlock() if ok { - if collectStats { - stats.Chit++ - } return sprop } @@ -346,17 +343,20 @@ func GetProperties(t reflect.Type) *StructProperties { return sprop } +type ( + oneofFuncsIface interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + oneofWrappersIface interface { + XXX_OneofWrappers() []interface{} + } +) + // getPropertiesLocked requires that propertiesMu is held. func getPropertiesLocked(t reflect.Type) *StructProperties { if prop, ok := propertiesMap[t]; ok { - if collectStats { - stats.Chit++ - } return prop } - if collectStats { - stats.Cmiss++ - } prop := new(StructProperties) // in case of recursive protos, fill this in now. @@ -391,13 +391,14 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { // Re-order prop.order. sort.Sort(prop) - type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + var oots []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oots = m.XXX_OneofFuncs() + case oneofWrappersIface: + oots = m.XXX_OneofWrappers() } - if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { - var oots []interface{} - _, _, _, oots = om.XXX_OneofFuncs() - + if len(oots) > 0 { // Interpret oneof metadata. prop.OneofTypes = make(map[string]*OneofProperties) for _, oot := range oots { diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go index b16794496f..5cb11fa955 100644 --- a/vendor/github.com/golang/protobuf/proto/table_marshal.go +++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go @@ -87,6 +87,7 @@ type marshalElemInfo struct { sizer sizer marshaler marshaler isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) + deref bool // dereference the pointer before operating on it; implies isptr } var ( @@ -320,8 +321,11 @@ func (u *marshalInfo) computeMarshalInfo() { // get oneof implementers var oneofImplementers []interface{} - if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() } n := t.NumField() @@ -407,13 +411,22 @@ func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { panic("tag is not an integer") } wt := wiretype(tags[0]) + if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct { + t = t.Elem() + } sizer, marshaler := typeMarshaler(t, tags, false, false) + var deref bool + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + t = reflect.PtrTo(t) + deref = true + } e = &marshalElemInfo{ wiretag: uint64(tag)<<3 | wt, tagsize: SizeVarint(uint64(tag) << 3), sizer: sizer, marshaler: marshaler, isptr: t.Kind() == reflect.Ptr, + deref: deref, } // update cache @@ -448,7 +461,7 @@ func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { fi.field = toField(f) - fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. fi.isPointer = true fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) @@ -476,10 +489,6 @@ func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofI } } -type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) -} - // wiretype returns the wire encoding of the type. func wiretype(encoding string) uint64 { switch encoding { @@ -2310,8 +2319,8 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { for _, k := range m.MapKeys() { ki := k.Interface() vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + kaddr := toAddrPointer(&ki, false, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) n += siz + SizeVarint(uint64(siz)) + tagsize } @@ -2329,8 +2338,8 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { for _, k := range keys { ki := k.Interface() vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + kaddr := toAddrPointer(&ki, false, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value b = appendVarint(b, tag) siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) b = appendVarint(b, uint64(siz)) @@ -2399,7 +2408,7 @@ func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { // the last time this function was called. ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) n += ei.sizer(p, ei.tagsize) } mu.Unlock() @@ -2434,7 +2443,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) if !nerr.Merge(err) { return b, err @@ -2465,7 +2474,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) if !nerr.Merge(err) { return b, err @@ -2510,7 +2519,7 @@ func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) n += ei.sizer(p, 1) // message, tag = 3 (size=1) } mu.Unlock() @@ -2553,7 +2562,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) if !nerr.Merge(err) { return b, err @@ -2591,7 +2600,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) b = append(b, 1<<3|WireEndGroup) if !nerr.Merge(err) { @@ -2621,7 +2630,7 @@ func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) n += ei.sizer(p, ei.tagsize) } return n @@ -2656,7 +2665,7 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) if !nerr.Merge(err) { return b, err diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go index ebf1caa56a..acee2fc529 100644 --- a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go +++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go @@ -136,7 +136,7 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error { u.computeUnmarshalInfo() } if u.isMessageSet { - return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions()) + return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions()) } var reqMask uint64 // bitmask of required fields we've seen. var errLater error @@ -362,46 +362,48 @@ func (u *unmarshalInfo) computeUnmarshalInfo() { } // Find any types associated with oneof fields. - // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") - if fn.IsValid() { - res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} - for i := res.Len() - 1; i >= 0; i-- { - v := res.Index(i) // interface{} - tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X - typ := tptr.Elem() // Msg_X - - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break - } + var oneofImplementers []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() + } + for _, v := range oneofImplementers { + tptr := reflect.TypeOf(v) // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break } + } - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) - } + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) } } + } // Get extension ranges, if any. - fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") if fn.IsValid() { if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { panic("a message with extensions, but no extensions field in " + t.Name()) @@ -1948,7 +1950,7 @@ func encodeVarint(b []byte, x uint64) []byte { // If there is an error, it returns 0,0. func decodeVarint(b []byte) (uint64, int) { var x, y uint64 - if len(b) <= 0 { + if len(b) == 0 { goto bad } x = uint64(b[0]) diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go index e3c56d3ffa..78ee523349 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -1,11 +1,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/any.proto -package any // import "github.com/golang/protobuf/ptypes/any" +package any -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -16,7 +18,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // `Any` contains an arbitrary serialized protocol buffer message along with a // URL that describes the type of the serialized message. @@ -99,17 +101,18 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // } // type Any struct { - // A URL/resource name whose content describes the type of the - // serialized protocol buffer message. + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). // - // For URLs which use the scheme `http`, `https`, or no scheme, the - // following restrictions and interpretations apply: + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: // // * If no scheme is provided, `https` is assumed. - // * The last segment of the URL's path must represent the fully - // qualified name of the type (as in `path/google.protobuf.Duration`). - // The name should be in a canonical form (e.g., leading "." is - // not accepted). // * An HTTP GET on the URL must yield a [google.protobuf.Type][] // value in binary format, or produce an error. // * Applications are allowed to cache lookup results based on the @@ -118,6 +121,10 @@ type Any struct { // on changes to types. (Use versioned type names to manage // breaking changes.) // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. // @@ -133,17 +140,19 @@ func (m *Any) Reset() { *m = Any{} } func (m *Any) String() string { return proto.CompactTextString(m) } func (*Any) ProtoMessage() {} func (*Any) Descriptor() ([]byte, []int) { - return fileDescriptor_any_744b9ca530f228db, []int{0} + return fileDescriptor_b53526c13ae22eb4, []int{0} } + func (*Any) XXX_WellKnownType() string { return "Any" } + func (m *Any) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Any.Unmarshal(m, b) } func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Any.Marshal(b, m, deterministic) } -func (dst *Any) XXX_Merge(src proto.Message) { - xxx_messageInfo_Any.Merge(dst, src) +func (m *Any) XXX_Merge(src proto.Message) { + xxx_messageInfo_Any.Merge(m, src) } func (m *Any) XXX_Size() int { return xxx_messageInfo_Any.Size(m) @@ -172,9 +181,9 @@ func init() { proto.RegisterType((*Any)(nil), "google.protobuf.Any") } -func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_any_744b9ca530f228db) } +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) } -var fileDescriptor_any_744b9ca530f228db = []byte{ +var fileDescriptor_b53526c13ae22eb4 = []byte{ // 185 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto index c748667623..4932942558 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.proto +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto @@ -120,17 +120,18 @@ option objc_class_prefix = "GPB"; // } // message Any { - // A URL/resource name whose content describes the type of the - // serialized protocol buffer message. + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). // - // For URLs which use the scheme `http`, `https`, or no scheme, the - // following restrictions and interpretations apply: + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: // // * If no scheme is provided, `https` is assumed. - // * The last segment of the URL's path must represent the fully - // qualified name of the type (as in `path/google.protobuf.Duration`). - // The name should be in a canonical form (e.g., leading "." is - // not accepted). // * An HTTP GET on the URL must yield a [google.protobuf.Type][] // value in binary format, or produce an error. // * Applications are allowed to cache lookup results based on the @@ -139,6 +140,10 @@ message Any { // on changes to types. (Use versioned type names to manage // breaking changes.) // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. // diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go index 65cb0f8eb5..26d1ca2fb5 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -82,7 +82,7 @@ func Duration(p *durpb.Duration) (time.Duration, error) { return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) } if p.Nanos != 0 { - d += time.Duration(p.Nanos) + d += time.Duration(p.Nanos) * time.Nanosecond if (d < 0) != (p.Nanos < 0) { return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) } diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go index a7beb2c414..0d681ee21a 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -1,11 +1,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/duration.proto -package duration // import "github.com/golang/protobuf/ptypes/duration" +package duration -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -16,7 +18,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // A Duration represents a signed, fixed-length span of time represented // as a count of seconds and fractions of seconds at nanosecond @@ -99,17 +101,19 @@ func (m *Duration) Reset() { *m = Duration{} } func (m *Duration) String() string { return proto.CompactTextString(m) } func (*Duration) ProtoMessage() {} func (*Duration) Descriptor() ([]byte, []int) { - return fileDescriptor_duration_e7d612259e3f0613, []int{0} + return fileDescriptor_23597b2ebd7ac6c5, []int{0} } + func (*Duration) XXX_WellKnownType() string { return "Duration" } + func (m *Duration) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Duration.Unmarshal(m, b) } func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Duration.Marshal(b, m, deterministic) } -func (dst *Duration) XXX_Merge(src proto.Message) { - xxx_messageInfo_Duration.Merge(dst, src) +func (m *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(m, src) } func (m *Duration) XXX_Size() int { return xxx_messageInfo_Duration.Size(m) @@ -138,11 +142,9 @@ func init() { proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") } -func init() { - proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_duration_e7d612259e3f0613) -} +func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) } -var fileDescriptor_duration_e7d612259e3f0613 = []byte{ +var fileDescriptor_23597b2ebd7ac6c5 = []byte{ // 190 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go index ee6382e143..33daa73dd2 100644 --- a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go @@ -1,11 +1,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/struct.proto -package structpb // import "github.com/golang/protobuf/ptypes/struct" +package structpb -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -16,7 +18,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // `NullValue` is a singleton enumeration to represent the null value for the // `Value` type union. @@ -32,6 +34,7 @@ const ( var NullValue_name = map[int32]string{ 0: "NULL_VALUE", } + var NullValue_value = map[string]int32{ "NULL_VALUE": 0, } @@ -39,9 +42,11 @@ var NullValue_value = map[string]int32{ func (x NullValue) String() string { return proto.EnumName(NullValue_name, int32(x)) } + func (NullValue) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_struct_3a5a94e0c7801b27, []int{0} + return fileDescriptor_df322afd6c9fb402, []int{0} } + func (NullValue) XXX_WellKnownType() string { return "NullValue" } // `Struct` represents a structured data value, consisting of fields @@ -64,17 +69,19 @@ func (m *Struct) Reset() { *m = Struct{} } func (m *Struct) String() string { return proto.CompactTextString(m) } func (*Struct) ProtoMessage() {} func (*Struct) Descriptor() ([]byte, []int) { - return fileDescriptor_struct_3a5a94e0c7801b27, []int{0} + return fileDescriptor_df322afd6c9fb402, []int{0} } + func (*Struct) XXX_WellKnownType() string { return "Struct" } + func (m *Struct) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Struct.Unmarshal(m, b) } func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Struct.Marshal(b, m, deterministic) } -func (dst *Struct) XXX_Merge(src proto.Message) { - xxx_messageInfo_Struct.Merge(dst, src) +func (m *Struct) XXX_Merge(src proto.Message) { + xxx_messageInfo_Struct.Merge(m, src) } func (m *Struct) XXX_Size() int { return xxx_messageInfo_Struct.Size(m) @@ -118,17 +125,19 @@ func (m *Value) Reset() { *m = Value{} } func (m *Value) String() string { return proto.CompactTextString(m) } func (*Value) ProtoMessage() {} func (*Value) Descriptor() ([]byte, []int) { - return fileDescriptor_struct_3a5a94e0c7801b27, []int{1} + return fileDescriptor_df322afd6c9fb402, []int{1} } + func (*Value) XXX_WellKnownType() string { return "Value" } + func (m *Value) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Value.Unmarshal(m, b) } func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Value.Marshal(b, m, deterministic) } -func (dst *Value) XXX_Merge(src proto.Message) { - xxx_messageInfo_Value.Merge(dst, src) +func (m *Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Value.Merge(m, src) } func (m *Value) XXX_Size() int { return xxx_messageInfo_Value.Size(m) @@ -228,9 +237,9 @@ func (m *Value) GetListValue() *ListValue { return nil } -// XXX_OneofFuncs is for the internal use of the proto package. -func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{ +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Value) XXX_OneofWrappers() []interface{} { + return []interface{}{ (*Value_NullValue)(nil), (*Value_NumberValue)(nil), (*Value_StringValue)(nil), @@ -240,129 +249,6 @@ func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, } } -func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*Value) - // kind - switch x := m.Kind.(type) { - case *Value_NullValue: - b.EncodeVarint(1<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.NullValue)) - case *Value_NumberValue: - b.EncodeVarint(2<<3 | proto.WireFixed64) - b.EncodeFixed64(math.Float64bits(x.NumberValue)) - case *Value_StringValue: - b.EncodeVarint(3<<3 | proto.WireBytes) - b.EncodeStringBytes(x.StringValue) - case *Value_BoolValue: - t := uint64(0) - if x.BoolValue { - t = 1 - } - b.EncodeVarint(4<<3 | proto.WireVarint) - b.EncodeVarint(t) - case *Value_StructValue: - b.EncodeVarint(5<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.StructValue); err != nil { - return err - } - case *Value_ListValue: - b.EncodeVarint(6<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ListValue); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("Value.Kind has unexpected type %T", x) - } - return nil -} - -func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*Value) - switch tag { - case 1: // kind.null_value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Kind = &Value_NullValue{NullValue(x)} - return true, err - case 2: // kind.number_value - if wire != proto.WireFixed64 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed64() - m.Kind = &Value_NumberValue{math.Float64frombits(x)} - return true, err - case 3: // kind.string_value - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Kind = &Value_StringValue{x} - return true, err - case 4: // kind.bool_value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Kind = &Value_BoolValue{x != 0} - return true, err - case 5: // kind.struct_value - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Struct) - err := b.DecodeMessage(msg) - m.Kind = &Value_StructValue{msg} - return true, err - case 6: // kind.list_value - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ListValue) - err := b.DecodeMessage(msg) - m.Kind = &Value_ListValue{msg} - return true, err - default: - return false, nil - } -} - -func _Value_OneofSizer(msg proto.Message) (n int) { - m := msg.(*Value) - // kind - switch x := m.Kind.(type) { - case *Value_NullValue: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(x.NullValue)) - case *Value_NumberValue: - n += 1 // tag and wire - n += 8 - case *Value_StringValue: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(len(x.StringValue))) - n += len(x.StringValue) - case *Value_BoolValue: - n += 1 // tag and wire - n += 1 - case *Value_StructValue: - s := proto.Size(x.StructValue) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *Value_ListValue: - s := proto.Size(x.ListValue) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - // `ListValue` is a wrapper around a repeated field of values. // // The JSON representation for `ListValue` is JSON array. @@ -378,17 +264,19 @@ func (m *ListValue) Reset() { *m = ListValue{} } func (m *ListValue) String() string { return proto.CompactTextString(m) } func (*ListValue) ProtoMessage() {} func (*ListValue) Descriptor() ([]byte, []int) { - return fileDescriptor_struct_3a5a94e0c7801b27, []int{2} + return fileDescriptor_df322afd6c9fb402, []int{2} } + func (*ListValue) XXX_WellKnownType() string { return "ListValue" } + func (m *ListValue) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ListValue.Unmarshal(m, b) } func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ListValue.Marshal(b, m, deterministic) } -func (dst *ListValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListValue.Merge(dst, src) +func (m *ListValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListValue.Merge(m, src) } func (m *ListValue) XXX_Size() int { return xxx_messageInfo_ListValue.Size(m) @@ -407,18 +295,16 @@ func (m *ListValue) GetValues() []*Value { } func init() { + proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry") proto.RegisterType((*Value)(nil), "google.protobuf.Value") proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") - proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) } -func init() { - proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_struct_3a5a94e0c7801b27) -} +func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_df322afd6c9fb402) } -var fileDescriptor_struct_3a5a94e0c7801b27 = []byte{ +var fileDescriptor_df322afd6c9fb402 = []byte{ // 417 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40, 0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09, diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go index 47f10dbc2c..8da0df01ac 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -111,11 +111,9 @@ func TimestampNow() *tspb.Timestamp { // TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. // It returns an error if the resulting Timestamp is invalid. func TimestampProto(t time.Time) (*tspb.Timestamp, error) { - seconds := t.Unix() - nanos := int32(t.Sub(time.Unix(seconds, 0))) ts := &tspb.Timestamp{ - Seconds: seconds, - Nanos: nanos, + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), } if err := validateTimestamp(ts); err != nil { return nil, err diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go index 8e76ae9763..31cd846de9 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -1,11 +1,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/timestamp.proto -package timestamp // import "github.com/golang/protobuf/ptypes/timestamp" +package timestamp -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -16,7 +18,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // A Timestamp represents a point in time independent of any time zone // or calendar, represented as seconds and fractions of seconds at @@ -81,7 +83,9 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional // seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), // are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required, though only UTC (as indicated by "Z") is presently supported. +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). // // For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past // 01:30 UTC on January 15, 2017. @@ -92,8 +96,8 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) // with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one // can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--) -// to obtain a formatter capable of generating timestamps in this format. +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- +// ) to obtain a formatter capable of generating timestamps in this format. // // type Timestamp struct { @@ -115,17 +119,19 @@ func (m *Timestamp) Reset() { *m = Timestamp{} } func (m *Timestamp) String() string { return proto.CompactTextString(m) } func (*Timestamp) ProtoMessage() {} func (*Timestamp) Descriptor() ([]byte, []int) { - return fileDescriptor_timestamp_b826e8e5fba671a8, []int{0} + return fileDescriptor_292007bbfe81227e, []int{0} } + func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } + func (m *Timestamp) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Timestamp.Unmarshal(m, b) } func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) } -func (dst *Timestamp) XXX_Merge(src proto.Message) { - xxx_messageInfo_Timestamp.Merge(dst, src) +func (m *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(m, src) } func (m *Timestamp) XXX_Size() int { return xxx_messageInfo_Timestamp.Size(m) @@ -154,11 +160,9 @@ func init() { proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") } -func init() { - proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_timestamp_b826e8e5fba671a8) -} +func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) } -var fileDescriptor_timestamp_b826e8e5fba671a8 = []byte{ +var fileDescriptor_292007bbfe81227e = []byte{ // 191 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto index 06750ab1f1..eafb3fa03a 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto @@ -103,7 +103,9 @@ option objc_class_prefix = "GPB"; // {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional // seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), // are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required, though only UTC (as indicated by "Z") is presently supported. +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). // // For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past // 01:30 UTC on January 15, 2017. @@ -114,8 +116,8 @@ option objc_class_prefix = "GPB"; // to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) // with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one // can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--) -// to obtain a formatter capable of generating timestamps in this format. +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- +// ) to obtain a formatter capable of generating timestamps in this format. // // message Timestamp { diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go index 0f0fa837f6..add19a1adb 100644 --- a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go @@ -1,11 +1,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/wrappers.proto -package wrappers // import "github.com/golang/protobuf/ptypes/wrappers" +package wrappers -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -16,7 +18,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // Wrapper message for `double`. // @@ -33,17 +35,19 @@ func (m *DoubleValue) Reset() { *m = DoubleValue{} } func (m *DoubleValue) String() string { return proto.CompactTextString(m) } func (*DoubleValue) ProtoMessage() {} func (*DoubleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{0} + return fileDescriptor_5377b62bda767935, []int{0} } + func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } + func (m *DoubleValue) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_DoubleValue.Unmarshal(m, b) } func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic) } -func (dst *DoubleValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_DoubleValue.Merge(dst, src) +func (m *DoubleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleValue.Merge(m, src) } func (m *DoubleValue) XXX_Size() int { return xxx_messageInfo_DoubleValue.Size(m) @@ -76,17 +80,19 @@ func (m *FloatValue) Reset() { *m = FloatValue{} } func (m *FloatValue) String() string { return proto.CompactTextString(m) } func (*FloatValue) ProtoMessage() {} func (*FloatValue) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{1} + return fileDescriptor_5377b62bda767935, []int{1} } + func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } + func (m *FloatValue) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_FloatValue.Unmarshal(m, b) } func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic) } -func (dst *FloatValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_FloatValue.Merge(dst, src) +func (m *FloatValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_FloatValue.Merge(m, src) } func (m *FloatValue) XXX_Size() int { return xxx_messageInfo_FloatValue.Size(m) @@ -119,17 +125,19 @@ func (m *Int64Value) Reset() { *m = Int64Value{} } func (m *Int64Value) String() string { return proto.CompactTextString(m) } func (*Int64Value) ProtoMessage() {} func (*Int64Value) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{2} + return fileDescriptor_5377b62bda767935, []int{2} } + func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } + func (m *Int64Value) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Int64Value.Unmarshal(m, b) } func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic) } -func (dst *Int64Value) XXX_Merge(src proto.Message) { - xxx_messageInfo_Int64Value.Merge(dst, src) +func (m *Int64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int64Value.Merge(m, src) } func (m *Int64Value) XXX_Size() int { return xxx_messageInfo_Int64Value.Size(m) @@ -162,17 +170,19 @@ func (m *UInt64Value) Reset() { *m = UInt64Value{} } func (m *UInt64Value) String() string { return proto.CompactTextString(m) } func (*UInt64Value) ProtoMessage() {} func (*UInt64Value) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{3} + return fileDescriptor_5377b62bda767935, []int{3} } + func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } + func (m *UInt64Value) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_UInt64Value.Unmarshal(m, b) } func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic) } -func (dst *UInt64Value) XXX_Merge(src proto.Message) { - xxx_messageInfo_UInt64Value.Merge(dst, src) +func (m *UInt64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt64Value.Merge(m, src) } func (m *UInt64Value) XXX_Size() int { return xxx_messageInfo_UInt64Value.Size(m) @@ -205,17 +215,19 @@ func (m *Int32Value) Reset() { *m = Int32Value{} } func (m *Int32Value) String() string { return proto.CompactTextString(m) } func (*Int32Value) ProtoMessage() {} func (*Int32Value) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{4} + return fileDescriptor_5377b62bda767935, []int{4} } + func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } + func (m *Int32Value) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Int32Value.Unmarshal(m, b) } func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic) } -func (dst *Int32Value) XXX_Merge(src proto.Message) { - xxx_messageInfo_Int32Value.Merge(dst, src) +func (m *Int32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int32Value.Merge(m, src) } func (m *Int32Value) XXX_Size() int { return xxx_messageInfo_Int32Value.Size(m) @@ -248,17 +260,19 @@ func (m *UInt32Value) Reset() { *m = UInt32Value{} } func (m *UInt32Value) String() string { return proto.CompactTextString(m) } func (*UInt32Value) ProtoMessage() {} func (*UInt32Value) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{5} + return fileDescriptor_5377b62bda767935, []int{5} } + func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } + func (m *UInt32Value) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_UInt32Value.Unmarshal(m, b) } func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic) } -func (dst *UInt32Value) XXX_Merge(src proto.Message) { - xxx_messageInfo_UInt32Value.Merge(dst, src) +func (m *UInt32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt32Value.Merge(m, src) } func (m *UInt32Value) XXX_Size() int { return xxx_messageInfo_UInt32Value.Size(m) @@ -291,17 +305,19 @@ func (m *BoolValue) Reset() { *m = BoolValue{} } func (m *BoolValue) String() string { return proto.CompactTextString(m) } func (*BoolValue) ProtoMessage() {} func (*BoolValue) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{6} + return fileDescriptor_5377b62bda767935, []int{6} } + func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } + func (m *BoolValue) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BoolValue.Unmarshal(m, b) } func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic) } -func (dst *BoolValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_BoolValue.Merge(dst, src) +func (m *BoolValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BoolValue.Merge(m, src) } func (m *BoolValue) XXX_Size() int { return xxx_messageInfo_BoolValue.Size(m) @@ -334,17 +350,19 @@ func (m *StringValue) Reset() { *m = StringValue{} } func (m *StringValue) String() string { return proto.CompactTextString(m) } func (*StringValue) ProtoMessage() {} func (*StringValue) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{7} + return fileDescriptor_5377b62bda767935, []int{7} } + func (*StringValue) XXX_WellKnownType() string { return "StringValue" } + func (m *StringValue) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StringValue.Unmarshal(m, b) } func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StringValue.Marshal(b, m, deterministic) } -func (dst *StringValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_StringValue.Merge(dst, src) +func (m *StringValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_StringValue.Merge(m, src) } func (m *StringValue) XXX_Size() int { return xxx_messageInfo_StringValue.Size(m) @@ -377,17 +395,19 @@ func (m *BytesValue) Reset() { *m = BytesValue{} } func (m *BytesValue) String() string { return proto.CompactTextString(m) } func (*BytesValue) ProtoMessage() {} func (*BytesValue) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{8} + return fileDescriptor_5377b62bda767935, []int{8} } + func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } + func (m *BytesValue) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BytesValue.Unmarshal(m, b) } func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic) } -func (dst *BytesValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_BytesValue.Merge(dst, src) +func (m *BytesValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BytesValue.Merge(m, src) } func (m *BytesValue) XXX_Size() int { return xxx_messageInfo_BytesValue.Size(m) @@ -417,11 +437,9 @@ func init() { proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") } -func init() { - proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_wrappers_16c7c35c009f3253) -} +func init() { proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_5377b62bda767935) } -var fileDescriptor_wrappers_16c7c35c009f3253 = []byte{ +var fileDescriptor_5377b62bda767935 = []byte{ // 259 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c, diff --git a/vendor/github.com/google/go-cmp/LICENSE b/vendor/github.com/google/go-cmp/LICENSE new file mode 100644 index 0000000000..32017f8fa1 --- /dev/null +++ b/vendor/github.com/google/go-cmp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go new file mode 100644 index 0000000000..7e215f2202 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -0,0 +1,553 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package cmp determines equality of values. +// +// This package is intended to be a more powerful and safer alternative to +// reflect.DeepEqual for comparing whether two values are semantically equal. +// +// The primary features of cmp are: +// +// • When the default behavior of equality does not suit the needs of the test, +// custom equality functions can override the equality operation. +// For example, an equality function may report floats as equal so long as they +// are within some tolerance of each other. +// +// • Types that have an Equal method may use that method to determine equality. +// This allows package authors to determine the equality operation for the types +// that they define. +// +// • If no custom equality functions are used and no Equal method is defined, +// equality is determined by recursively comparing the primitive kinds on both +// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported +// fields are not compared by default; they result in panics unless suppressed +// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly compared +// using the AllowUnexported option. +package cmp + +import ( + "fmt" + "reflect" + + "github.com/google/go-cmp/cmp/internal/diff" + "github.com/google/go-cmp/cmp/internal/function" + "github.com/google/go-cmp/cmp/internal/value" +) + +// BUG(dsnet): Maps with keys containing NaN values cannot be properly compared due to +// the reflection package's inability to retrieve such entries. Equal will panic +// anytime it comes across a NaN key, but this behavior may change. +// +// See https://golang.org/issue/11104 for more details. + +var nothing = reflect.Value{} + +// Equal reports whether x and y are equal by recursively applying the +// following rules in the given order to x and y and all of their sub-values: +// +// • If two values are not of the same type, then they are never equal +// and the overall result is false. +// +// • Let S be the set of all Ignore, Transformer, and Comparer options that +// remain after applying all path filters, value filters, and type filters. +// If at least one Ignore exists in S, then the comparison is ignored. +// If the number of Transformer and Comparer options in S is greater than one, +// then Equal panics because it is ambiguous which option to use. +// If S contains a single Transformer, then use that to transform the current +// values and recursively call Equal on the output values. +// If S contains a single Comparer, then use that to compare the current values. +// Otherwise, evaluation proceeds to the next rule. +// +// • If the values have an Equal method of the form "(T) Equal(T) bool" or +// "(T) Equal(I) bool" where T is assignable to I, then use the result of +// x.Equal(y) even if x or y is nil. +// Otherwise, no such method exists and evaluation proceeds to the next rule. +// +// • Lastly, try to compare x and y based on their basic kinds. +// Simple kinds like booleans, integers, floats, complex numbers, strings, and +// channels are compared using the equivalent of the == operator in Go. +// Functions are only equal if they are both nil, otherwise they are unequal. +// Pointers are equal if the underlying values they point to are also equal. +// Interfaces are equal if their underlying concrete values are also equal. +// +// Structs are equal if all of their fields are equal. If a struct contains +// unexported fields, Equal panics unless the AllowUnexported option is used or +// an Ignore option (e.g., cmpopts.IgnoreUnexported) ignores that field. +// +// Arrays, slices, and maps are equal if they are both nil or both non-nil +// with the same length and the elements at each index or key are equal. +// Note that a non-nil empty slice and a nil slice are not equal. +// To equate empty slices and maps, consider using cmpopts.EquateEmpty. +// Map keys are equal according to the == operator. +// To use custom comparisons for map keys, consider using cmpopts.SortMaps. +func Equal(x, y interface{}, opts ...Option) bool { + s := newState(opts) + s.compareAny(reflect.ValueOf(x), reflect.ValueOf(y)) + return s.result.Equal() +} + +// Diff returns a human-readable report of the differences between two values. +// It returns an empty string if and only if Equal returns true for the same +// input values and options. The output string will use the "-" symbol to +// indicate elements removed from x, and the "+" symbol to indicate elements +// added to y. +// +// Do not depend on this output being stable. +func Diff(x, y interface{}, opts ...Option) string { + r := new(defaultReporter) + opts = Options{Options(opts), r} + eq := Equal(x, y, opts...) + d := r.String() + if (d == "") != eq { + panic("inconsistent difference and equality results") + } + return d +} + +type state struct { + // These fields represent the "comparison state". + // Calling statelessCompare must not result in observable changes to these. + result diff.Result // The current result of comparison + curPath Path // The current path in the value tree + reporter reporter // Optional reporter used for difference formatting + + // dynChecker triggers pseudo-random checks for option correctness. + // It is safe for statelessCompare to mutate this value. + dynChecker dynChecker + + // These fields, once set by processOption, will not change. + exporters map[reflect.Type]bool // Set of structs with unexported field visibility + opts Options // List of all fundamental and filter options +} + +func newState(opts []Option) *state { + s := new(state) + for _, opt := range opts { + s.processOption(opt) + } + return s +} + +func (s *state) processOption(opt Option) { + switch opt := opt.(type) { + case nil: + case Options: + for _, o := range opt { + s.processOption(o) + } + case coreOption: + type filtered interface { + isFiltered() bool + } + if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() { + panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt)) + } + s.opts = append(s.opts, opt) + case visibleStructs: + if s.exporters == nil { + s.exporters = make(map[reflect.Type]bool) + } + for t := range opt { + s.exporters[t] = true + } + case reporter: + if s.reporter != nil { + panic("difference reporter already registered") + } + s.reporter = opt + default: + panic(fmt.Sprintf("unknown option %T", opt)) + } +} + +// statelessCompare compares two values and returns the result. +// This function is stateless in that it does not alter the current result, +// or output to any registered reporters. +func (s *state) statelessCompare(vx, vy reflect.Value) diff.Result { + // We do not save and restore the curPath because all of the compareX + // methods should properly push and pop from the path. + // It is an implementation bug if the contents of curPath differs from + // when calling this function to when returning from it. + + oldResult, oldReporter := s.result, s.reporter + s.result = diff.Result{} // Reset result + s.reporter = nil // Remove reporter to avoid spurious printouts + s.compareAny(vx, vy) + res := s.result + s.result, s.reporter = oldResult, oldReporter + return res +} + +func (s *state) compareAny(vx, vy reflect.Value) { + // TODO: Support cyclic data structures. + + // Rule 0: Differing types are never equal. + if !vx.IsValid() || !vy.IsValid() { + s.report(vx.IsValid() == vy.IsValid(), vx, vy) + return + } + if vx.Type() != vy.Type() { + s.report(false, vx, vy) // Possible for path to be empty + return + } + t := vx.Type() + if len(s.curPath) == 0 { + s.curPath.push(&pathStep{typ: t}) + defer s.curPath.pop() + } + vx, vy = s.tryExporting(vx, vy) + + // Rule 1: Check whether an option applies on this node in the value tree. + if s.tryOptions(vx, vy, t) { + return + } + + // Rule 2: Check whether the type has a valid Equal method. + if s.tryMethod(vx, vy, t) { + return + } + + // Rule 3: Recursively descend into each value's underlying kind. + switch t.Kind() { + case reflect.Bool: + s.report(vx.Bool() == vy.Bool(), vx, vy) + return + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + s.report(vx.Int() == vy.Int(), vx, vy) + return + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + s.report(vx.Uint() == vy.Uint(), vx, vy) + return + case reflect.Float32, reflect.Float64: + s.report(vx.Float() == vy.Float(), vx, vy) + return + case reflect.Complex64, reflect.Complex128: + s.report(vx.Complex() == vy.Complex(), vx, vy) + return + case reflect.String: + s.report(vx.String() == vy.String(), vx, vy) + return + case reflect.Chan, reflect.UnsafePointer: + s.report(vx.Pointer() == vy.Pointer(), vx, vy) + return + case reflect.Func: + s.report(vx.IsNil() && vy.IsNil(), vx, vy) + return + case reflect.Ptr: + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), vx, vy) + return + } + s.curPath.push(&indirect{pathStep{t.Elem()}}) + defer s.curPath.pop() + s.compareAny(vx.Elem(), vy.Elem()) + return + case reflect.Interface: + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), vx, vy) + return + } + if vx.Elem().Type() != vy.Elem().Type() { + s.report(false, vx.Elem(), vy.Elem()) + return + } + s.curPath.push(&typeAssertion{pathStep{vx.Elem().Type()}}) + defer s.curPath.pop() + s.compareAny(vx.Elem(), vy.Elem()) + return + case reflect.Slice: + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), vx, vy) + return + } + fallthrough + case reflect.Array: + s.compareArray(vx, vy, t) + return + case reflect.Map: + s.compareMap(vx, vy, t) + return + case reflect.Struct: + s.compareStruct(vx, vy, t) + return + default: + panic(fmt.Sprintf("%v kind not handled", t.Kind())) + } +} + +func (s *state) tryExporting(vx, vy reflect.Value) (reflect.Value, reflect.Value) { + if sf, ok := s.curPath[len(s.curPath)-1].(*structField); ok && sf.unexported { + if sf.force { + // Use unsafe pointer arithmetic to get read-write access to an + // unexported field in the struct. + vx = unsafeRetrieveField(sf.pvx, sf.field) + vy = unsafeRetrieveField(sf.pvy, sf.field) + } else { + // We are not allowed to export the value, so invalidate them + // so that tryOptions can panic later if not explicitly ignored. + vx = nothing + vy = nothing + } + } + return vx, vy +} + +func (s *state) tryOptions(vx, vy reflect.Value, t reflect.Type) bool { + // If there were no FilterValues, we will not detect invalid inputs, + // so manually check for them and append invalid if necessary. + // We still evaluate the options since an ignore can override invalid. + opts := s.opts + if !vx.IsValid() || !vy.IsValid() { + opts = Options{opts, invalid{}} + } + + // Evaluate all filters and apply the remaining options. + if opt := opts.filter(s, vx, vy, t); opt != nil { + opt.apply(s, vx, vy) + return true + } + return false +} + +func (s *state) tryMethod(vx, vy reflect.Value, t reflect.Type) bool { + // Check if this type even has an Equal method. + m, ok := t.MethodByName("Equal") + if !ok || !function.IsType(m.Type, function.EqualAssignable) { + return false + } + + eq := s.callTTBFunc(m.Func, vx, vy) + s.report(eq, vx, vy) + return true +} + +func (s *state) callTRFunc(f, v reflect.Value) reflect.Value { + v = sanitizeValue(v, f.Type().In(0)) + if !s.dynChecker.Next() { + return f.Call([]reflect.Value{v})[0] + } + + // Run the function twice and ensure that we get the same results back. + // We run in goroutines so that the race detector (if enabled) can detect + // unsafe mutations to the input. + c := make(chan reflect.Value) + go detectRaces(c, f, v) + want := f.Call([]reflect.Value{v})[0] + if got := <-c; !s.statelessCompare(got, want).Equal() { + // To avoid false-positives with non-reflexive equality operations, + // we sanity check whether a value is equal to itself. + if !s.statelessCompare(want, want).Equal() { + return want + } + fn := getFuncName(f.Pointer()) + panic(fmt.Sprintf("non-deterministic function detected: %s", fn)) + } + return want +} + +func (s *state) callTTBFunc(f, x, y reflect.Value) bool { + x = sanitizeValue(x, f.Type().In(0)) + y = sanitizeValue(y, f.Type().In(1)) + if !s.dynChecker.Next() { + return f.Call([]reflect.Value{x, y})[0].Bool() + } + + // Swapping the input arguments is sufficient to check that + // f is symmetric and deterministic. + // We run in goroutines so that the race detector (if enabled) can detect + // unsafe mutations to the input. + c := make(chan reflect.Value) + go detectRaces(c, f, y, x) + want := f.Call([]reflect.Value{x, y})[0].Bool() + if got := <-c; !got.IsValid() || got.Bool() != want { + fn := getFuncName(f.Pointer()) + panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", fn)) + } + return want +} + +func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { + var ret reflect.Value + defer func() { + recover() // Ignore panics, let the other call to f panic instead + c <- ret + }() + ret = f.Call(vs)[0] +} + +// sanitizeValue converts nil interfaces of type T to those of type R, +// assuming that T is assignable to R. +// Otherwise, it returns the input value as is. +func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { + // TODO(dsnet): Remove this hacky workaround. + // See https://golang.org/issue/22143 + if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { + return reflect.New(t).Elem() + } + return v +} + +func (s *state) compareArray(vx, vy reflect.Value, t reflect.Type) { + step := &sliceIndex{pathStep{t.Elem()}, 0, 0} + s.curPath.push(step) + + // Compute an edit-script for slices vx and vy. + es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result { + step.xkey, step.ykey = ix, iy + return s.statelessCompare(vx.Index(ix), vy.Index(iy)) + }) + + // Report the entire slice as is if the arrays are of primitive kind, + // and the arrays are different enough. + isPrimitive := false + switch t.Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + isPrimitive = true + } + if isPrimitive && es.Dist() > (vx.Len()+vy.Len())/4 { + s.curPath.pop() // Pop first since we are reporting the whole slice + s.report(false, vx, vy) + return + } + + // Replay the edit-script. + var ix, iy int + for _, e := range es { + switch e { + case diff.UniqueX: + step.xkey, step.ykey = ix, -1 + s.report(false, vx.Index(ix), nothing) + ix++ + case diff.UniqueY: + step.xkey, step.ykey = -1, iy + s.report(false, nothing, vy.Index(iy)) + iy++ + default: + step.xkey, step.ykey = ix, iy + if e == diff.Identity { + s.report(true, vx.Index(ix), vy.Index(iy)) + } else { + s.compareAny(vx.Index(ix), vy.Index(iy)) + } + ix++ + iy++ + } + } + s.curPath.pop() + return +} + +func (s *state) compareMap(vx, vy reflect.Value, t reflect.Type) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), vx, vy) + return + } + + // We combine and sort the two map keys so that we can perform the + // comparisons in a deterministic order. + step := &mapIndex{pathStep: pathStep{t.Elem()}} + s.curPath.push(step) + defer s.curPath.pop() + for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) { + step.key = k + vvx := vx.MapIndex(k) + vvy := vy.MapIndex(k) + switch { + case vvx.IsValid() && vvy.IsValid(): + s.compareAny(vvx, vvy) + case vvx.IsValid() && !vvy.IsValid(): + s.report(false, vvx, nothing) + case !vvx.IsValid() && vvy.IsValid(): + s.report(false, nothing, vvy) + default: + // It is possible for both vvx and vvy to be invalid if the + // key contained a NaN value in it. There is no way in + // reflection to be able to retrieve these values. + // See https://golang.org/issue/11104 + panic(fmt.Sprintf("%#v has map key with NaNs", s.curPath)) + } + } +} + +func (s *state) compareStruct(vx, vy reflect.Value, t reflect.Type) { + var vax, vay reflect.Value // Addressable versions of vx and vy + + step := &structField{} + s.curPath.push(step) + defer s.curPath.pop() + for i := 0; i < t.NumField(); i++ { + vvx := vx.Field(i) + vvy := vy.Field(i) + step.typ = t.Field(i).Type + step.name = t.Field(i).Name + step.idx = i + step.unexported = !isExported(step.name) + if step.unexported { + // Defer checking of unexported fields until later to give an + // Ignore a chance to ignore the field. + if !vax.IsValid() || !vay.IsValid() { + // For unsafeRetrieveField to work, the parent struct must + // be addressable. Create a new copy of the values if + // necessary to make them addressable. + vax = makeAddressable(vx) + vay = makeAddressable(vy) + } + step.force = s.exporters[t] + step.pvx = vax + step.pvy = vay + step.field = t.Field(i) + } + s.compareAny(vvx, vvy) + } +} + +// report records the result of a single comparison. +// It also calls Report if any reporter is registered. +func (s *state) report(eq bool, vx, vy reflect.Value) { + if eq { + s.result.NSame++ + } else { + s.result.NDiff++ + } + if s.reporter != nil { + s.reporter.Report(vx, vy, eq, s.curPath) + } +} + +// dynChecker tracks the state needed to periodically perform checks that +// user provided functions are symmetric and deterministic. +// The zero value is safe for immediate use. +type dynChecker struct{ curr, next int } + +// Next increments the state and reports whether a check should be performed. +// +// Checks occur every Nth function call, where N is a triangular number: +// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ... +// See https://en.wikipedia.org/wiki/Triangular_number +// +// This sequence ensures that the cost of checks drops significantly as +// the number of functions calls grows larger. +func (dc *dynChecker) Next() bool { + ok := dc.curr == dc.next + if ok { + dc.curr = 0 + dc.next++ + } + dc.curr++ + return ok +} + +// makeAddressable returns a value that is always addressable. +// It returns the input verbatim if it is already addressable, +// otherwise it creates a new value and returns an addressable copy. +func makeAddressable(v reflect.Value) reflect.Value { + if v.CanAddr() { + return v + } + vc := reflect.New(v.Type()).Elem() + vc.Set(v) + return vc +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go new file mode 100644 index 0000000000..42afa4960e --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go @@ -0,0 +1,17 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !debug + +package diff + +var debug debugger + +type debugger struct{} + +func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc { + return f +} +func (debugger) Update() {} +func (debugger) Finish() {} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go new file mode 100644 index 0000000000..fd9f7f1773 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go @@ -0,0 +1,122 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build debug + +package diff + +import ( + "fmt" + "strings" + "sync" + "time" +) + +// The algorithm can be seen running in real-time by enabling debugging: +// go test -tags=debug -v +// +// Example output: +// === RUN TestDifference/#34 +// ┌───────────────────────────────┐ +// │ \ · · · · · · · · · · · · · · │ +// │ · # · · · · · · · · · · · · · │ +// │ · \ · · · · · · · · · · · · · │ +// │ · · \ · · · · · · · · · · · · │ +// │ · · · X # · · · · · · · · · · │ +// │ · · · # \ · · · · · · · · · · │ +// │ · · · · · # # · · · · · · · · │ +// │ · · · · · # \ · · · · · · · · │ +// │ · · · · · · · \ · · · · · · · │ +// │ · · · · · · · · \ · · · · · · │ +// │ · · · · · · · · · \ · · · · · │ +// │ · · · · · · · · · · \ · · # · │ +// │ · · · · · · · · · · · \ # # · │ +// │ · · · · · · · · · · · # # # · │ +// │ · · · · · · · · · · # # # # · │ +// │ · · · · · · · · · # # # # # · │ +// │ · · · · · · · · · · · · · · \ │ +// └───────────────────────────────┘ +// [.Y..M.XY......YXYXY.|] +// +// The grid represents the edit-graph where the horizontal axis represents +// list X and the vertical axis represents list Y. The start of the two lists +// is the top-left, while the ends are the bottom-right. The '·' represents +// an unexplored node in the graph. The '\' indicates that the two symbols +// from list X and Y are equal. The 'X' indicates that two symbols are similar +// (but not exactly equal) to each other. The '#' indicates that the two symbols +// are different (and not similar). The algorithm traverses this graph trying to +// make the paths starting in the top-left and the bottom-right connect. +// +// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents +// the currently established path from the forward and reverse searches, +// separated by a '|' character. + +const ( + updateDelay = 100 * time.Millisecond + finishDelay = 500 * time.Millisecond + ansiTerminal = true // ANSI escape codes used to move terminal cursor +) + +var debug debugger + +type debugger struct { + sync.Mutex + p1, p2 EditScript + fwdPath, revPath *EditScript + grid []byte + lines int +} + +func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc { + dbg.Lock() + dbg.fwdPath, dbg.revPath = p1, p2 + top := "┌─" + strings.Repeat("──", nx) + "┐\n" + row := "│ " + strings.Repeat("· ", nx) + "│\n" + btm := "└─" + strings.Repeat("──", nx) + "┘\n" + dbg.grid = []byte(top + strings.Repeat(row, ny) + btm) + dbg.lines = strings.Count(dbg.String(), "\n") + fmt.Print(dbg) + + // Wrap the EqualFunc so that we can intercept each result. + return func(ix, iy int) (r Result) { + cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")] + for i := range cell { + cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot + } + switch r = f(ix, iy); { + case r.Equal(): + cell[0] = '\\' + case r.Similar(): + cell[0] = 'X' + default: + cell[0] = '#' + } + return + } +} + +func (dbg *debugger) Update() { + dbg.print(updateDelay) +} + +func (dbg *debugger) Finish() { + dbg.print(finishDelay) + dbg.Unlock() +} + +func (dbg *debugger) String() string { + dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0] + for i := len(*dbg.revPath) - 1; i >= 0; i-- { + dbg.p2 = append(dbg.p2, (*dbg.revPath)[i]) + } + return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2) +} + +func (dbg *debugger) print(d time.Duration) { + if ansiTerminal { + fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor + } + fmt.Print(dbg) + time.Sleep(d) +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go new file mode 100644 index 0000000000..260befea2f --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -0,0 +1,363 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package diff implements an algorithm for producing edit-scripts. +// The edit-script is a sequence of operations needed to transform one list +// of symbols into another (or vice-versa). The edits allowed are insertions, +// deletions, and modifications. The summation of all edits is called the +// Levenshtein distance as this problem is well-known in computer science. +// +// This package prioritizes performance over accuracy. That is, the run time +// is more important than obtaining a minimal Levenshtein distance. +package diff + +// EditType represents a single operation within an edit-script. +type EditType uint8 + +const ( + // Identity indicates that a symbol pair is identical in both list X and Y. + Identity EditType = iota + // UniqueX indicates that a symbol only exists in X and not Y. + UniqueX + // UniqueY indicates that a symbol only exists in Y and not X. + UniqueY + // Modified indicates that a symbol pair is a modification of each other. + Modified +) + +// EditScript represents the series of differences between two lists. +type EditScript []EditType + +// String returns a human-readable string representing the edit-script where +// Identity, UniqueX, UniqueY, and Modified are represented by the +// '.', 'X', 'Y', and 'M' characters, respectively. +func (es EditScript) String() string { + b := make([]byte, len(es)) + for i, e := range es { + switch e { + case Identity: + b[i] = '.' + case UniqueX: + b[i] = 'X' + case UniqueY: + b[i] = 'Y' + case Modified: + b[i] = 'M' + default: + panic("invalid edit-type") + } + } + return string(b) +} + +// stats returns a histogram of the number of each type of edit operation. +func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) { + for _, e := range es { + switch e { + case Identity: + s.NI++ + case UniqueX: + s.NX++ + case UniqueY: + s.NY++ + case Modified: + s.NM++ + default: + panic("invalid edit-type") + } + } + return +} + +// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if +// lists X and Y are equal. +func (es EditScript) Dist() int { return len(es) - es.stats().NI } + +// LenX is the length of the X list. +func (es EditScript) LenX() int { return len(es) - es.stats().NY } + +// LenY is the length of the Y list. +func (es EditScript) LenY() int { return len(es) - es.stats().NX } + +// EqualFunc reports whether the symbols at indexes ix and iy are equal. +// When called by Difference, the index is guaranteed to be within nx and ny. +type EqualFunc func(ix int, iy int) Result + +// Result is the result of comparison. +// NSame is the number of sub-elements that are equal. +// NDiff is the number of sub-elements that are not equal. +type Result struct{ NSame, NDiff int } + +// Equal indicates whether the symbols are equal. Two symbols are equal +// if and only if NDiff == 0. If Equal, then they are also Similar. +func (r Result) Equal() bool { return r.NDiff == 0 } + +// Similar indicates whether two symbols are similar and may be represented +// by using the Modified type. As a special case, we consider binary comparisons +// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar. +// +// The exact ratio of NSame to NDiff to determine similarity may change. +func (r Result) Similar() bool { + // Use NSame+1 to offset NSame so that binary comparisons are similar. + return r.NSame+1 >= r.NDiff +} + +// Difference reports whether two lists of lengths nx and ny are equal +// given the definition of equality provided as f. +// +// This function returns an edit-script, which is a sequence of operations +// needed to convert one list into the other. The following invariants for +// the edit-script are maintained: +// • eq == (es.Dist()==0) +// • nx == es.LenX() +// • ny == es.LenY() +// +// This algorithm is not guaranteed to be an optimal solution (i.e., one that +// produces an edit-script with a minimal Levenshtein distance). This algorithm +// favors performance over optimality. The exact output is not guaranteed to +// be stable and may change over time. +func Difference(nx, ny int, f EqualFunc) (es EditScript) { + // This algorithm is based on traversing what is known as an "edit-graph". + // See Figure 1 from "An O(ND) Difference Algorithm and Its Variations" + // by Eugene W. Myers. Since D can be as large as N itself, this is + // effectively O(N^2). Unlike the algorithm from that paper, we are not + // interested in the optimal path, but at least some "decent" path. + // + // For example, let X and Y be lists of symbols: + // X = [A B C A B B A] + // Y = [C B A B A C] + // + // The edit-graph can be drawn as the following: + // A B C A B B A + // ┌─────────────┐ + // C │_|_|\|_|_|_|_│ 0 + // B │_|\|_|_|\|\|_│ 1 + // A │\|_|_|\|_|_|\│ 2 + // B │_|\|_|_|\|\|_│ 3 + // A │\|_|_|\|_|_|\│ 4 + // C │ | |\| | | | │ 5 + // └─────────────┘ 6 + // 0 1 2 3 4 5 6 7 + // + // List X is written along the horizontal axis, while list Y is written + // along the vertical axis. At any point on this grid, if the symbol in + // list X matches the corresponding symbol in list Y, then a '\' is drawn. + // The goal of any minimal edit-script algorithm is to find a path from the + // top-left corner to the bottom-right corner, while traveling through the + // fewest horizontal or vertical edges. + // A horizontal edge is equivalent to inserting a symbol from list X. + // A vertical edge is equivalent to inserting a symbol from list Y. + // A diagonal edge is equivalent to a matching symbol between both X and Y. + + // Invariants: + // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx + // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny + // + // In general: + // • fwdFrontier.X < revFrontier.X + // • fwdFrontier.Y < revFrontier.Y + // Unless, it is time for the algorithm to terminate. + fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)} + revPath := path{-1, point{nx, ny}, make(EditScript, 0)} + fwdFrontier := fwdPath.point // Forward search frontier + revFrontier := revPath.point // Reverse search frontier + + // Search budget bounds the cost of searching for better paths. + // The longest sequence of non-matching symbols that can be tolerated is + // approximately the square-root of the search budget. + searchBudget := 4 * (nx + ny) // O(n) + + // The algorithm below is a greedy, meet-in-the-middle algorithm for + // computing sub-optimal edit-scripts between two lists. + // + // The algorithm is approximately as follows: + // • Searching for differences switches back-and-forth between + // a search that starts at the beginning (the top-left corner), and + // a search that starts at the end (the bottom-right corner). The goal of + // the search is connect with the search from the opposite corner. + // • As we search, we build a path in a greedy manner, where the first + // match seen is added to the path (this is sub-optimal, but provides a + // decent result in practice). When matches are found, we try the next pair + // of symbols in the lists and follow all matches as far as possible. + // • When searching for matches, we search along a diagonal going through + // through the "frontier" point. If no matches are found, we advance the + // frontier towards the opposite corner. + // • This algorithm terminates when either the X coordinates or the + // Y coordinates of the forward and reverse frontier points ever intersect. + // + // This algorithm is correct even if searching only in the forward direction + // or in the reverse direction. We do both because it is commonly observed + // that two lists commonly differ because elements were added to the front + // or end of the other list. + // + // Running the tests with the "debug" build tag prints a visualization of + // the algorithm running in real-time. This is educational for understanding + // how the algorithm works. See debug_enable.go. + f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es) + for { + // Forward search from the beginning. + if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { + break + } + for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + // Search in a diagonal pattern for a match. + z := zigzag(i) + p := point{fwdFrontier.X + z, fwdFrontier.Y - z} + switch { + case p.X >= revPath.X || p.Y < fwdPath.Y: + stop1 = true // Hit top-right corner + case p.Y >= revPath.Y || p.X < fwdPath.X: + stop2 = true // Hit bottom-left corner + case f(p.X, p.Y).Equal(): + // Match found, so connect the path to this point. + fwdPath.connect(p, f) + fwdPath.append(Identity) + // Follow sequence of matches as far as possible. + for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { + if !f(fwdPath.X, fwdPath.Y).Equal() { + break + } + fwdPath.append(Identity) + } + fwdFrontier = fwdPath.point + stop1, stop2 = true, true + default: + searchBudget-- // Match not found + } + debug.Update() + } + // Advance the frontier towards reverse point. + if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y { + fwdFrontier.X++ + } else { + fwdFrontier.Y++ + } + + // Reverse search from the end. + if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { + break + } + for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + // Search in a diagonal pattern for a match. + z := zigzag(i) + p := point{revFrontier.X - z, revFrontier.Y + z} + switch { + case fwdPath.X >= p.X || revPath.Y < p.Y: + stop1 = true // Hit bottom-left corner + case fwdPath.Y >= p.Y || revPath.X < p.X: + stop2 = true // Hit top-right corner + case f(p.X-1, p.Y-1).Equal(): + // Match found, so connect the path to this point. + revPath.connect(p, f) + revPath.append(Identity) + // Follow sequence of matches as far as possible. + for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { + if !f(revPath.X-1, revPath.Y-1).Equal() { + break + } + revPath.append(Identity) + } + revFrontier = revPath.point + stop1, stop2 = true, true + default: + searchBudget-- // Match not found + } + debug.Update() + } + // Advance the frontier towards forward point. + if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y { + revFrontier.X-- + } else { + revFrontier.Y-- + } + } + + // Join the forward and reverse paths and then append the reverse path. + fwdPath.connect(revPath.point, f) + for i := len(revPath.es) - 1; i >= 0; i-- { + t := revPath.es[i] + revPath.es = revPath.es[:i] + fwdPath.append(t) + } + debug.Finish() + return fwdPath.es +} + +type path struct { + dir int // +1 if forward, -1 if reverse + point // Leading point of the EditScript path + es EditScript +} + +// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types +// to the edit-script to connect p.point to dst. +func (p *path) connect(dst point, f EqualFunc) { + if p.dir > 0 { + // Connect in forward direction. + for dst.X > p.X && dst.Y > p.Y { + switch r := f(p.X, p.Y); { + case r.Equal(): + p.append(Identity) + case r.Similar(): + p.append(Modified) + case dst.X-p.X >= dst.Y-p.Y: + p.append(UniqueX) + default: + p.append(UniqueY) + } + } + for dst.X > p.X { + p.append(UniqueX) + } + for dst.Y > p.Y { + p.append(UniqueY) + } + } else { + // Connect in reverse direction. + for p.X > dst.X && p.Y > dst.Y { + switch r := f(p.X-1, p.Y-1); { + case r.Equal(): + p.append(Identity) + case r.Similar(): + p.append(Modified) + case p.Y-dst.Y >= p.X-dst.X: + p.append(UniqueY) + default: + p.append(UniqueX) + } + } + for p.X > dst.X { + p.append(UniqueX) + } + for p.Y > dst.Y { + p.append(UniqueY) + } + } +} + +func (p *path) append(t EditType) { + p.es = append(p.es, t) + switch t { + case Identity, Modified: + p.add(p.dir, p.dir) + case UniqueX: + p.add(p.dir, 0) + case UniqueY: + p.add(0, p.dir) + } + debug.Update() +} + +type point struct{ X, Y int } + +func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy } + +// zigzag maps a consecutive sequence of integers to a zig-zag sequence. +// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...] +func zigzag(x int) int { + if x&1 != 0 { + x = ^x + } + return x >> 1 +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go new file mode 100644 index 0000000000..4c35ff11ee --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go @@ -0,0 +1,49 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package function identifies function types. +package function + +import "reflect" + +type funcType int + +const ( + _ funcType = iota + + ttbFunc // func(T, T) bool + tibFunc // func(T, I) bool + trFunc // func(T) R + + Equal = ttbFunc // func(T, T) bool + EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool + Transformer = trFunc // func(T) R + ValueFilter = ttbFunc // func(T, T) bool + Less = ttbFunc // func(T, T) bool +) + +var boolType = reflect.TypeOf(true) + +// IsType reports whether the reflect.Type is of the specified function type. +func IsType(t reflect.Type, ft funcType) bool { + if t == nil || t.Kind() != reflect.Func || t.IsVariadic() { + return false + } + ni, no := t.NumIn(), t.NumOut() + switch ft { + case ttbFunc: // func(T, T) bool + if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType { + return true + } + case tibFunc: // func(T, I) bool + if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType { + return true + } + case trFunc: // func(T) R + if ni == 1 && no == 1 { + return true + } + } + return false +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/format.go b/vendor/github.com/google/go-cmp/cmp/internal/value/format.go new file mode 100644 index 0000000000..657e508779 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/format.go @@ -0,0 +1,277 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package value provides functionality for reflect.Value types. +package value + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "unicode" +) + +var stringerIface = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() + +// Format formats the value v as a string. +// +// This is similar to fmt.Sprintf("%+v", v) except this: +// * Prints the type unless it can be elided +// * Avoids printing struct fields that are zero +// * Prints a nil-slice as being nil, not empty +// * Prints map entries in deterministic order +func Format(v reflect.Value, conf FormatConfig) string { + conf.printType = true + conf.followPointers = true + conf.realPointers = true + return formatAny(v, conf, nil) +} + +type FormatConfig struct { + UseStringer bool // Should the String method be used if available? + printType bool // Should we print the type before the value? + PrintPrimitiveType bool // Should we print the type of primitives? + followPointers bool // Should we recursively follow pointers? + realPointers bool // Should we print the real address of pointers? +} + +func formatAny(v reflect.Value, conf FormatConfig, visited map[uintptr]bool) string { + // TODO: Should this be a multi-line printout in certain situations? + + if !v.IsValid() { + return "" + } + if conf.UseStringer && v.Type().Implements(stringerIface) && v.CanInterface() { + if (v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface) && v.IsNil() { + return "" + } + + const stringerPrefix = "s" // Indicates that the String method was used + s := v.Interface().(fmt.Stringer).String() + return stringerPrefix + formatString(s) + } + + switch v.Kind() { + case reflect.Bool: + return formatPrimitive(v.Type(), v.Bool(), conf) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return formatPrimitive(v.Type(), v.Int(), conf) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + if v.Type().PkgPath() == "" || v.Kind() == reflect.Uintptr { + // Unnamed uints are usually bytes or words, so use hexadecimal. + return formatPrimitive(v.Type(), formatHex(v.Uint()), conf) + } + return formatPrimitive(v.Type(), v.Uint(), conf) + case reflect.Float32, reflect.Float64: + return formatPrimitive(v.Type(), v.Float(), conf) + case reflect.Complex64, reflect.Complex128: + return formatPrimitive(v.Type(), v.Complex(), conf) + case reflect.String: + return formatPrimitive(v.Type(), formatString(v.String()), conf) + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + return formatPointer(v, conf) + case reflect.Ptr: + if v.IsNil() { + if conf.printType { + return fmt.Sprintf("(%v)(nil)", v.Type()) + } + return "" + } + if visited[v.Pointer()] || !conf.followPointers { + return formatPointer(v, conf) + } + visited = insertPointer(visited, v.Pointer()) + return "&" + formatAny(v.Elem(), conf, visited) + case reflect.Interface: + if v.IsNil() { + if conf.printType { + return fmt.Sprintf("%v(nil)", v.Type()) + } + return "" + } + return formatAny(v.Elem(), conf, visited) + case reflect.Slice: + if v.IsNil() { + if conf.printType { + return fmt.Sprintf("%v(nil)", v.Type()) + } + return "" + } + if visited[v.Pointer()] { + return formatPointer(v, conf) + } + visited = insertPointer(visited, v.Pointer()) + fallthrough + case reflect.Array: + var ss []string + subConf := conf + subConf.printType = v.Type().Elem().Kind() == reflect.Interface + for i := 0; i < v.Len(); i++ { + s := formatAny(v.Index(i), subConf, visited) + ss = append(ss, s) + } + s := fmt.Sprintf("{%s}", strings.Join(ss, ", ")) + if conf.printType { + return v.Type().String() + s + } + return s + case reflect.Map: + if v.IsNil() { + if conf.printType { + return fmt.Sprintf("%v(nil)", v.Type()) + } + return "" + } + if visited[v.Pointer()] { + return formatPointer(v, conf) + } + visited = insertPointer(visited, v.Pointer()) + + var ss []string + keyConf, valConf := conf, conf + keyConf.printType = v.Type().Key().Kind() == reflect.Interface + keyConf.followPointers = false + valConf.printType = v.Type().Elem().Kind() == reflect.Interface + for _, k := range SortKeys(v.MapKeys()) { + sk := formatAny(k, keyConf, visited) + sv := formatAny(v.MapIndex(k), valConf, visited) + ss = append(ss, fmt.Sprintf("%s: %s", sk, sv)) + } + s := fmt.Sprintf("{%s}", strings.Join(ss, ", ")) + if conf.printType { + return v.Type().String() + s + } + return s + case reflect.Struct: + var ss []string + subConf := conf + subConf.printType = true + for i := 0; i < v.NumField(); i++ { + vv := v.Field(i) + if isZero(vv) { + continue // Elide zero value fields + } + name := v.Type().Field(i).Name + subConf.UseStringer = conf.UseStringer + s := formatAny(vv, subConf, visited) + ss = append(ss, fmt.Sprintf("%s: %s", name, s)) + } + s := fmt.Sprintf("{%s}", strings.Join(ss, ", ")) + if conf.printType { + return v.Type().String() + s + } + return s + default: + panic(fmt.Sprintf("%v kind not handled", v.Kind())) + } +} + +func formatString(s string) string { + // Use quoted string if it the same length as a raw string literal. + // Otherwise, attempt to use the raw string form. + qs := strconv.Quote(s) + if len(qs) == 1+len(s)+1 { + return qs + } + + // Disallow newlines to ensure output is a single line. + // Only allow printable runes for readability purposes. + rawInvalid := func(r rune) bool { + return r == '`' || r == '\n' || !unicode.IsPrint(r) + } + if strings.IndexFunc(s, rawInvalid) < 0 { + return "`" + s + "`" + } + return qs +} + +func formatPrimitive(t reflect.Type, v interface{}, conf FormatConfig) string { + if conf.printType && (conf.PrintPrimitiveType || t.PkgPath() != "") { + return fmt.Sprintf("%v(%v)", t, v) + } + return fmt.Sprintf("%v", v) +} + +func formatPointer(v reflect.Value, conf FormatConfig) string { + p := v.Pointer() + if !conf.realPointers { + p = 0 // For deterministic printing purposes + } + s := formatHex(uint64(p)) + if conf.printType { + return fmt.Sprintf("(%v)(%s)", v.Type(), s) + } + return s +} + +func formatHex(u uint64) string { + var f string + switch { + case u <= 0xff: + f = "0x%02x" + case u <= 0xffff: + f = "0x%04x" + case u <= 0xffffff: + f = "0x%06x" + case u <= 0xffffffff: + f = "0x%08x" + case u <= 0xffffffffff: + f = "0x%010x" + case u <= 0xffffffffffff: + f = "0x%012x" + case u <= 0xffffffffffffff: + f = "0x%014x" + case u <= 0xffffffffffffffff: + f = "0x%016x" + } + return fmt.Sprintf(f, u) +} + +// insertPointer insert p into m, allocating m if necessary. +func insertPointer(m map[uintptr]bool, p uintptr) map[uintptr]bool { + if m == nil { + m = make(map[uintptr]bool) + } + m[p] = true + return m +} + +// isZero reports whether v is the zero value. +// This does not rely on Interface and so can be used on unexported fields. +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return v.Bool() == false + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Complex64, reflect.Complex128: + return v.Complex() == 0 + case reflect.String: + return v.String() == "" + case reflect.UnsafePointer: + return v.Pointer() == 0 + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + return v.IsNil() + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if !isZero(v.Index(i)) { + return false + } + } + return true + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go new file mode 100644 index 0000000000..fe8aa27a07 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go @@ -0,0 +1,111 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package value + +import ( + "fmt" + "math" + "reflect" + "sort" +) + +// SortKeys sorts a list of map keys, deduplicating keys if necessary. +// The type of each value must be comparable. +func SortKeys(vs []reflect.Value) []reflect.Value { + if len(vs) == 0 { + return vs + } + + // Sort the map keys. + sort.Sort(valueSorter(vs)) + + // Deduplicate keys (fails for NaNs). + vs2 := vs[:1] + for _, v := range vs[1:] { + if isLess(vs2[len(vs2)-1], v) { + vs2 = append(vs2, v) + } + } + return vs2 +} + +// TODO: Use sort.Slice once Google AppEngine is on Go1.8 or above. +type valueSorter []reflect.Value + +func (vs valueSorter) Len() int { return len(vs) } +func (vs valueSorter) Less(i, j int) bool { return isLess(vs[i], vs[j]) } +func (vs valueSorter) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } + +// isLess is a generic function for sorting arbitrary map keys. +// The inputs must be of the same type and must be comparable. +func isLess(x, y reflect.Value) bool { + switch x.Type().Kind() { + case reflect.Bool: + return !x.Bool() && y.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return x.Int() < y.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return x.Uint() < y.Uint() + case reflect.Float32, reflect.Float64: + fx, fy := x.Float(), y.Float() + return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy) + case reflect.Complex64, reflect.Complex128: + cx, cy := x.Complex(), y.Complex() + rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy) + if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) { + return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy) + } + return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry) + case reflect.Ptr, reflect.UnsafePointer, reflect.Chan: + return x.Pointer() < y.Pointer() + case reflect.String: + return x.String() < y.String() + case reflect.Array: + for i := 0; i < x.Len(); i++ { + if isLess(x.Index(i), y.Index(i)) { + return true + } + if isLess(y.Index(i), x.Index(i)) { + return false + } + } + return false + case reflect.Struct: + for i := 0; i < x.NumField(); i++ { + if isLess(x.Field(i), y.Field(i)) { + return true + } + if isLess(y.Field(i), x.Field(i)) { + return false + } + } + return false + case reflect.Interface: + vx, vy := x.Elem(), y.Elem() + if !vx.IsValid() || !vy.IsValid() { + return !vx.IsValid() && vy.IsValid() + } + tx, ty := vx.Type(), vy.Type() + if tx == ty { + return isLess(x.Elem(), y.Elem()) + } + if tx.Kind() != ty.Kind() { + return vx.Kind() < vy.Kind() + } + if tx.String() != ty.String() { + return tx.String() < ty.String() + } + if tx.PkgPath() != ty.PkgPath() { + return tx.PkgPath() < ty.PkgPath() + } + // This can happen in rare situations, so we fallback to just comparing + // the unique pointer for a reflect.Type. This guarantees deterministic + // ordering within a program, but it is obviously not stable. + return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer() + default: + // Must be Func, Map, or Slice; which are not comparable. + panic(fmt.Sprintf("%T is not comparable", x.Type())) + } +} diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go new file mode 100644 index 0000000000..91d4b066e0 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -0,0 +1,453 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "runtime" + "strings" + + "github.com/google/go-cmp/cmp/internal/function" +) + +// Option configures for specific behavior of Equal and Diff. In particular, +// the fundamental Option functions (Ignore, Transformer, and Comparer), +// configure how equality is determined. +// +// The fundamental options may be composed with filters (FilterPath and +// FilterValues) to control the scope over which they are applied. +// +// The cmp/cmpopts package provides helper functions for creating options that +// may be used with Equal and Diff. +type Option interface { + // filter applies all filters and returns the option that remains. + // Each option may only read s.curPath and call s.callTTBFunc. + // + // An Options is returned only if multiple comparers or transformers + // can apply simultaneously and will only contain values of those types + // or sub-Options containing values of those types. + filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption +} + +// applicableOption represents the following types: +// Fundamental: ignore | invalid | *comparer | *transformer +// Grouping: Options +type applicableOption interface { + Option + + // apply executes the option, which may mutate s or panic. + apply(s *state, vx, vy reflect.Value) +} + +// coreOption represents the following types: +// Fundamental: ignore | invalid | *comparer | *transformer +// Filters: *pathFilter | *valuesFilter +type coreOption interface { + Option + isCore() +} + +type core struct{} + +func (core) isCore() {} + +// Options is a list of Option values that also satisfies the Option interface. +// Helper comparison packages may return an Options value when packing multiple +// Option values into a single Option. When this package processes an Options, +// it will be implicitly expanded into a flat list. +// +// Applying a filter on an Options is equivalent to applying that same filter +// on all individual options held within. +type Options []Option + +func (opts Options) filter(s *state, vx, vy reflect.Value, t reflect.Type) (out applicableOption) { + for _, opt := range opts { + switch opt := opt.filter(s, vx, vy, t); opt.(type) { + case ignore: + return ignore{} // Only ignore can short-circuit evaluation + case invalid: + out = invalid{} // Takes precedence over comparer or transformer + case *comparer, *transformer, Options: + switch out.(type) { + case nil: + out = opt + case invalid: + // Keep invalid + case *comparer, *transformer, Options: + out = Options{out, opt} // Conflicting comparers or transformers + } + } + } + return out +} + +func (opts Options) apply(s *state, _, _ reflect.Value) { + const warning = "ambiguous set of applicable options" + const help = "consider using filters to ensure at most one Comparer or Transformer may apply" + var ss []string + for _, opt := range flattenOptions(nil, opts) { + ss = append(ss, fmt.Sprint(opt)) + } + set := strings.Join(ss, "\n\t") + panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help)) +} + +func (opts Options) String() string { + var ss []string + for _, opt := range opts { + ss = append(ss, fmt.Sprint(opt)) + } + return fmt.Sprintf("Options{%s}", strings.Join(ss, ", ")) +} + +// FilterPath returns a new Option where opt is only evaluated if filter f +// returns true for the current Path in the value tree. +// +// The option passed in may be an Ignore, Transformer, Comparer, Options, or +// a previously filtered Option. +func FilterPath(f func(Path) bool, opt Option) Option { + if f == nil { + panic("invalid path filter function") + } + if opt := normalizeOption(opt); opt != nil { + return &pathFilter{fnc: f, opt: opt} + } + return nil +} + +type pathFilter struct { + core + fnc func(Path) bool + opt Option +} + +func (f pathFilter) filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption { + if f.fnc(s.curPath) { + return f.opt.filter(s, vx, vy, t) + } + return nil +} + +func (f pathFilter) String() string { + fn := getFuncName(reflect.ValueOf(f.fnc).Pointer()) + return fmt.Sprintf("FilterPath(%s, %v)", fn, f.opt) +} + +// FilterValues returns a new Option where opt is only evaluated if filter f, +// which is a function of the form "func(T, T) bool", returns true for the +// current pair of values being compared. If the type of the values is not +// assignable to T, then this filter implicitly returns false. +// +// The filter function must be +// symmetric (i.e., agnostic to the order of the inputs) and +// deterministic (i.e., produces the same result when given the same inputs). +// If T is an interface, it is possible that f is called with two values with +// different concrete types that both implement T. +// +// The option passed in may be an Ignore, Transformer, Comparer, Options, or +// a previously filtered Option. +func FilterValues(f interface{}, opt Option) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() { + panic(fmt.Sprintf("invalid values filter function: %T", f)) + } + if opt := normalizeOption(opt); opt != nil { + vf := &valuesFilter{fnc: v, opt: opt} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + vf.typ = ti + } + return vf + } + return nil +} + +type valuesFilter struct { + core + typ reflect.Type // T + fnc reflect.Value // func(T, T) bool + opt Option +} + +func (f valuesFilter) filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption { + if !vx.IsValid() || !vy.IsValid() { + return invalid{} + } + if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) { + return f.opt.filter(s, vx, vy, t) + } + return nil +} + +func (f valuesFilter) String() string { + fn := getFuncName(f.fnc.Pointer()) + return fmt.Sprintf("FilterValues(%s, %v)", fn, f.opt) +} + +// Ignore is an Option that causes all comparisons to be ignored. +// This value is intended to be combined with FilterPath or FilterValues. +// It is an error to pass an unfiltered Ignore option to Equal. +func Ignore() Option { return ignore{} } + +type ignore struct{ core } + +func (ignore) isFiltered() bool { return false } +func (ignore) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { return ignore{} } +func (ignore) apply(_ *state, _, _ reflect.Value) { return } +func (ignore) String() string { return "Ignore()" } + +// invalid is a sentinel Option type to indicate that some options could not +// be evaluated due to unexported fields. +type invalid struct{ core } + +func (invalid) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { return invalid{} } +func (invalid) apply(s *state, _, _ reflect.Value) { + const help = "consider using AllowUnexported or cmpopts.IgnoreUnexported" + panic(fmt.Sprintf("cannot handle unexported field: %#v\n%s", s.curPath, help)) +} + +// Transformer returns an Option that applies a transformation function that +// converts values of a certain type into that of another. +// +// The transformer f must be a function "func(T) R" that converts values of +// type T to those of type R and is implicitly filtered to input values +// assignable to T. The transformer must not mutate T in any way. +// +// To help prevent some cases of infinite recursive cycles applying the +// same transform to the output of itself (e.g., in the case where the +// input and output types are the same), an implicit filter is added such that +// a transformer is applicable only if that exact transformer is not already +// in the tail of the Path since the last non-Transform step. +// +// The name is a user provided label that is used as the Transform.Name in the +// transformation PathStep. If empty, an arbitrary name is used. +func Transformer(name string, f interface{}) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.Transformer) || v.IsNil() { + panic(fmt.Sprintf("invalid transformer function: %T", f)) + } + if name == "" { + name = "λ" // Lambda-symbol as place-holder for anonymous transformer + } + if !isValid(name) { + panic(fmt.Sprintf("invalid name: %q", name)) + } + tr := &transformer{name: name, fnc: reflect.ValueOf(f)} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + tr.typ = ti + } + return tr +} + +type transformer struct { + core + name string + typ reflect.Type // T + fnc reflect.Value // func(T) R +} + +func (tr *transformer) isFiltered() bool { return tr.typ != nil } + +func (tr *transformer) filter(s *state, _, _ reflect.Value, t reflect.Type) applicableOption { + for i := len(s.curPath) - 1; i >= 0; i-- { + if t, ok := s.curPath[i].(*transform); !ok { + break // Hit most recent non-Transform step + } else if tr == t.trans { + return nil // Cannot directly use same Transform + } + } + if tr.typ == nil || t.AssignableTo(tr.typ) { + return tr + } + return nil +} + +func (tr *transformer) apply(s *state, vx, vy reflect.Value) { + // Update path before calling the Transformer so that dynamic checks + // will use the updated path. + s.curPath.push(&transform{pathStep{tr.fnc.Type().Out(0)}, tr}) + defer s.curPath.pop() + + vx = s.callTRFunc(tr.fnc, vx) + vy = s.callTRFunc(tr.fnc, vy) + s.compareAny(vx, vy) +} + +func (tr transformer) String() string { + return fmt.Sprintf("Transformer(%s, %s)", tr.name, getFuncName(tr.fnc.Pointer())) +} + +// Comparer returns an Option that determines whether two values are equal +// to each other. +// +// The comparer f must be a function "func(T, T) bool" and is implicitly +// filtered to input values assignable to T. If T is an interface, it is +// possible that f is called with two values of different concrete types that +// both implement T. +// +// The equality function must be: +// • Symmetric: equal(x, y) == equal(y, x) +// • Deterministic: equal(x, y) == equal(x, y) +// • Pure: equal(x, y) does not modify x or y +func Comparer(f interface{}) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.Equal) || v.IsNil() { + panic(fmt.Sprintf("invalid comparer function: %T", f)) + } + cm := &comparer{fnc: v} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + cm.typ = ti + } + return cm +} + +type comparer struct { + core + typ reflect.Type // T + fnc reflect.Value // func(T, T) bool +} + +func (cm *comparer) isFiltered() bool { return cm.typ != nil } + +func (cm *comparer) filter(_ *state, _, _ reflect.Value, t reflect.Type) applicableOption { + if cm.typ == nil || t.AssignableTo(cm.typ) { + return cm + } + return nil +} + +func (cm *comparer) apply(s *state, vx, vy reflect.Value) { + eq := s.callTTBFunc(cm.fnc, vx, vy) + s.report(eq, vx, vy) +} + +func (cm comparer) String() string { + return fmt.Sprintf("Comparer(%s)", getFuncName(cm.fnc.Pointer())) +} + +// AllowUnexported returns an Option that forcibly allows operations on +// unexported fields in certain structs, which are specified by passing in a +// value of each struct type. +// +// Users of this option must understand that comparing on unexported fields +// from external packages is not safe since changes in the internal +// implementation of some external package may cause the result of Equal +// to unexpectedly change. However, it may be valid to use this option on types +// defined in an internal package where the semantic meaning of an unexported +// field is in the control of the user. +// +// For some cases, a custom Comparer should be used instead that defines +// equality as a function of the public API of a type rather than the underlying +// unexported implementation. +// +// For example, the reflect.Type documentation defines equality to be determined +// by the == operator on the interface (essentially performing a shallow pointer +// comparison) and most attempts to compare *regexp.Regexp types are interested +// in only checking that the regular expression strings are equal. +// Both of these are accomplished using Comparers: +// +// Comparer(func(x, y reflect.Type) bool { return x == y }) +// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() }) +// +// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore +// all unexported fields on specified struct types. +func AllowUnexported(types ...interface{}) Option { + if !supportAllowUnexported { + panic("AllowUnexported is not supported on purego builds, Google App Engine Standard, or GopherJS") + } + m := make(map[reflect.Type]bool) + for _, typ := range types { + t := reflect.TypeOf(typ) + if t.Kind() != reflect.Struct { + panic(fmt.Sprintf("invalid struct type: %T", typ)) + } + m[t] = true + } + return visibleStructs(m) +} + +type visibleStructs map[reflect.Type]bool + +func (visibleStructs) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { + panic("not implemented") +} + +// reporter is an Option that configures how differences are reported. +type reporter interface { + // TODO: Not exported yet. + // + // Perhaps add PushStep and PopStep and change Report to only accept + // a PathStep instead of the full-path? Adding a PushStep and PopStep makes + // it clear that we are traversing the value tree in a depth-first-search + // manner, which has an effect on how values are printed. + + Option + + // Report is called for every comparison made and will be provided with + // the two values being compared, the equality result, and the + // current path in the value tree. It is possible for x or y to be an + // invalid reflect.Value if one of the values is non-existent; + // which is possible with maps and slices. + Report(x, y reflect.Value, eq bool, p Path) +} + +// normalizeOption normalizes the input options such that all Options groups +// are flattened and groups with a single element are reduced to that element. +// Only coreOptions and Options containing coreOptions are allowed. +func normalizeOption(src Option) Option { + switch opts := flattenOptions(nil, Options{src}); len(opts) { + case 0: + return nil + case 1: + return opts[0] + default: + return opts + } +} + +// flattenOptions copies all options in src to dst as a flat list. +// Only coreOptions and Options containing coreOptions are allowed. +func flattenOptions(dst, src Options) Options { + for _, opt := range src { + switch opt := opt.(type) { + case nil: + continue + case Options: + dst = flattenOptions(dst, opt) + case coreOption: + dst = append(dst, opt) + default: + panic(fmt.Sprintf("invalid option type: %T", opt)) + } + } + return dst +} + +// getFuncName returns a short function name from the pointer. +// The string parsing logic works up until Go1.9. +func getFuncName(p uintptr) string { + fnc := runtime.FuncForPC(p) + if fnc == nil { + return "" + } + name := fnc.Name() // E.g., "long/path/name/mypkg.(mytype).(long/path/name/mypkg.myfunc)-fm" + if strings.HasSuffix(name, ")-fm") || strings.HasSuffix(name, ")·fm") { + // Strip the package name from method name. + name = strings.TrimSuffix(name, ")-fm") + name = strings.TrimSuffix(name, ")·fm") + if i := strings.LastIndexByte(name, '('); i >= 0 { + methodName := name[i+1:] // E.g., "long/path/name/mypkg.myfunc" + if j := strings.LastIndexByte(methodName, '.'); j >= 0 { + methodName = methodName[j+1:] // E.g., "myfunc" + } + name = name[:i] + methodName // E.g., "long/path/name/mypkg.(mytype)." + "myfunc" + } + } + if i := strings.LastIndexByte(name, '/'); i >= 0 { + // Strip the package name. + name = name[i+1:] // E.g., "mypkg.(mytype).myfunc" + } + return name +} diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go new file mode 100644 index 0000000000..c08a3cf80d --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -0,0 +1,309 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +type ( + // Path is a list of PathSteps describing the sequence of operations to get + // from some root type to the current position in the value tree. + // The first Path element is always an operation-less PathStep that exists + // simply to identify the initial type. + // + // When traversing structs with embedded structs, the embedded struct will + // always be accessed as a field before traversing the fields of the + // embedded struct themselves. That is, an exported field from the + // embedded struct will never be accessed directly from the parent struct. + Path []PathStep + + // PathStep is a union-type for specific operations to traverse + // a value's tree structure. Users of this package never need to implement + // these types as values of this type will be returned by this package. + PathStep interface { + String() string + Type() reflect.Type // Resulting type after performing the path step + isPathStep() + } + + // SliceIndex is an index operation on a slice or array at some index Key. + SliceIndex interface { + PathStep + Key() int // May return -1 if in a split state + + // SplitKeys returns the indexes for indexing into slices in the + // x and y values, respectively. These indexes may differ due to the + // insertion or removal of an element in one of the slices, causing + // all of the indexes to be shifted. If an index is -1, then that + // indicates that the element does not exist in the associated slice. + // + // Key is guaranteed to return -1 if and only if the indexes returned + // by SplitKeys are not the same. SplitKeys will never return -1 for + // both indexes. + SplitKeys() (x int, y int) + + isSliceIndex() + } + // MapIndex is an index operation on a map at some index Key. + MapIndex interface { + PathStep + Key() reflect.Value + isMapIndex() + } + // TypeAssertion represents a type assertion on an interface. + TypeAssertion interface { + PathStep + isTypeAssertion() + } + // StructField represents a struct field access on a field called Name. + StructField interface { + PathStep + Name() string + Index() int + isStructField() + } + // Indirect represents pointer indirection on the parent type. + Indirect interface { + PathStep + isIndirect() + } + // Transform is a transformation from the parent type to the current type. + Transform interface { + PathStep + Name() string + Func() reflect.Value + + // Option returns the originally constructed Transformer option. + // The == operator can be used to detect the exact option used. + Option() Option + + isTransform() + } +) + +func (pa *Path) push(s PathStep) { + *pa = append(*pa, s) +} + +func (pa *Path) pop() { + *pa = (*pa)[:len(*pa)-1] +} + +// Last returns the last PathStep in the Path. +// If the path is empty, this returns a non-nil PathStep that reports a nil Type. +func (pa Path) Last() PathStep { + return pa.Index(-1) +} + +// Index returns the ith step in the Path and supports negative indexing. +// A negative index starts counting from the tail of the Path such that -1 +// refers to the last step, -2 refers to the second-to-last step, and so on. +// If index is invalid, this returns a non-nil PathStep that reports a nil Type. +func (pa Path) Index(i int) PathStep { + if i < 0 { + i = len(pa) + i + } + if i < 0 || i >= len(pa) { + return pathStep{} + } + return pa[i] +} + +// String returns the simplified path to a node. +// The simplified path only contains struct field accesses. +// +// For example: +// MyMap.MySlices.MyField +func (pa Path) String() string { + var ss []string + for _, s := range pa { + if _, ok := s.(*structField); ok { + ss = append(ss, s.String()) + } + } + return strings.TrimPrefix(strings.Join(ss, ""), ".") +} + +// GoString returns the path to a specific node using Go syntax. +// +// For example: +// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField +func (pa Path) GoString() string { + var ssPre, ssPost []string + var numIndirect int + for i, s := range pa { + var nextStep PathStep + if i+1 < len(pa) { + nextStep = pa[i+1] + } + switch s := s.(type) { + case *indirect: + numIndirect++ + pPre, pPost := "(", ")" + switch nextStep.(type) { + case *indirect: + continue // Next step is indirection, so let them batch up + case *structField: + numIndirect-- // Automatic indirection on struct fields + case nil: + pPre, pPost = "", "" // Last step; no need for parenthesis + } + if numIndirect > 0 { + ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect)) + ssPost = append(ssPost, pPost) + } + numIndirect = 0 + continue + case *transform: + ssPre = append(ssPre, s.trans.name+"(") + ssPost = append(ssPost, ")") + continue + case *typeAssertion: + // As a special-case, elide type assertions on anonymous types + // since they are typically generated dynamically and can be very + // verbose. For example, some transforms return interface{} because + // of Go's lack of generics, but typically take in and return the + // exact same concrete type. + if s.Type().PkgPath() == "" { + continue + } + } + ssPost = append(ssPost, s.String()) + } + for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 { + ssPre[i], ssPre[j] = ssPre[j], ssPre[i] + } + return strings.Join(ssPre, "") + strings.Join(ssPost, "") +} + +type ( + pathStep struct { + typ reflect.Type + } + + sliceIndex struct { + pathStep + xkey, ykey int + } + mapIndex struct { + pathStep + key reflect.Value + } + typeAssertion struct { + pathStep + } + structField struct { + pathStep + name string + idx int + + // These fields are used for forcibly accessing an unexported field. + // pvx, pvy, and field are only valid if unexported is true. + unexported bool + force bool // Forcibly allow visibility + pvx, pvy reflect.Value // Parent values + field reflect.StructField // Field information + } + indirect struct { + pathStep + } + transform struct { + pathStep + trans *transformer + } +) + +func (ps pathStep) Type() reflect.Type { return ps.typ } +func (ps pathStep) String() string { + if ps.typ == nil { + return "" + } + s := ps.typ.String() + if s == "" || strings.ContainsAny(s, "{}\n") { + return "root" // Type too simple or complex to print + } + return fmt.Sprintf("{%s}", s) +} + +func (si sliceIndex) String() string { + switch { + case si.xkey == si.ykey: + return fmt.Sprintf("[%d]", si.xkey) + case si.ykey == -1: + // [5->?] means "I don't know where X[5] went" + return fmt.Sprintf("[%d->?]", si.xkey) + case si.xkey == -1: + // [?->3] means "I don't know where Y[3] came from" + return fmt.Sprintf("[?->%d]", si.ykey) + default: + // [5->3] means "X[5] moved to Y[3]" + return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey) + } +} +func (mi mapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) } +func (ta typeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) } +func (sf structField) String() string { return fmt.Sprintf(".%s", sf.name) } +func (in indirect) String() string { return "*" } +func (tf transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) } + +func (si sliceIndex) Key() int { + if si.xkey != si.ykey { + return -1 + } + return si.xkey +} +func (si sliceIndex) SplitKeys() (x, y int) { return si.xkey, si.ykey } +func (mi mapIndex) Key() reflect.Value { return mi.key } +func (sf structField) Name() string { return sf.name } +func (sf structField) Index() int { return sf.idx } +func (tf transform) Name() string { return tf.trans.name } +func (tf transform) Func() reflect.Value { return tf.trans.fnc } +func (tf transform) Option() Option { return tf.trans } + +func (pathStep) isPathStep() {} +func (sliceIndex) isSliceIndex() {} +func (mapIndex) isMapIndex() {} +func (typeAssertion) isTypeAssertion() {} +func (structField) isStructField() {} +func (indirect) isIndirect() {} +func (transform) isTransform() {} + +var ( + _ SliceIndex = sliceIndex{} + _ MapIndex = mapIndex{} + _ TypeAssertion = typeAssertion{} + _ StructField = structField{} + _ Indirect = indirect{} + _ Transform = transform{} + + _ PathStep = sliceIndex{} + _ PathStep = mapIndex{} + _ PathStep = typeAssertion{} + _ PathStep = structField{} + _ PathStep = indirect{} + _ PathStep = transform{} +) + +// isExported reports whether the identifier is exported. +func isExported(id string) bool { + r, _ := utf8.DecodeRuneInString(id) + return unicode.IsUpper(r) +} + +// isValid reports whether the identifier is valid. +// Empty and underscore-only strings are not valid. +func isValid(id string) bool { + ok := id != "" && id != "_" + for j, c := range id { + ok = ok && (j > 0 || !unicode.IsDigit(c)) + ok = ok && (c == '_' || unicode.IsLetter(c) || unicode.IsDigit(c)) + } + return ok +} diff --git a/vendor/github.com/google/go-cmp/cmp/reporter.go b/vendor/github.com/google/go-cmp/cmp/reporter.go new file mode 100644 index 0000000000..20e9f18e0d --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/reporter.go @@ -0,0 +1,53 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp/internal/value" +) + +type defaultReporter struct { + Option + diffs []string // List of differences, possibly truncated + ndiffs int // Total number of differences + nbytes int // Number of bytes in diffs + nlines int // Number of lines in diffs +} + +var _ reporter = (*defaultReporter)(nil) + +func (r *defaultReporter) Report(x, y reflect.Value, eq bool, p Path) { + if eq { + return // Ignore equal results + } + const maxBytes = 4096 + const maxLines = 256 + r.ndiffs++ + if r.nbytes < maxBytes && r.nlines < maxLines { + sx := value.Format(x, value.FormatConfig{UseStringer: true}) + sy := value.Format(y, value.FormatConfig{UseStringer: true}) + if sx == sy { + // Unhelpful output, so use more exact formatting. + sx = value.Format(x, value.FormatConfig{PrintPrimitiveType: true}) + sy = value.Format(y, value.FormatConfig{PrintPrimitiveType: true}) + } + s := fmt.Sprintf("%#v:\n\t-: %s\n\t+: %s\n", p, sx, sy) + r.diffs = append(r.diffs, s) + r.nbytes += len(s) + r.nlines += strings.Count(s, "\n") + } +} + +func (r *defaultReporter) String() string { + s := strings.Join(r.diffs, "") + if r.ndiffs == len(r.diffs) { + return s + } + return fmt.Sprintf("%s... %d more differences ...", s, r.ndiffs-len(r.diffs)) +} diff --git a/vendor/github.com/google/go-cmp/cmp/unsafe_panic.go b/vendor/github.com/google/go-cmp/cmp/unsafe_panic.go new file mode 100644 index 0000000000..d1518eb3a8 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/unsafe_panic.go @@ -0,0 +1,15 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build purego appengine js + +package cmp + +import "reflect" + +const supportAllowUnexported = false + +func unsafeRetrieveField(reflect.Value, reflect.StructField) reflect.Value { + panic("unsafeRetrieveField is not implemented") +} diff --git a/vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go b/vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go new file mode 100644 index 0000000000..579b65507f --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go @@ -0,0 +1,23 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !purego,!appengine,!js + +package cmp + +import ( + "reflect" + "unsafe" +) + +const supportAllowUnexported = true + +// unsafeRetrieveField uses unsafe to forcibly retrieve any field from a struct +// such that the value has read-write permissions. +// +// The parent struct, v, must be addressable, while f must be a StructField +// describing the field to retrieve. +func unsafeRetrieveField(v reflect.Value, f reflect.StructField) reflect.Value { + return reflect.NewAt(f.Type, unsafe.Pointer(v.UnsafeAddr()+f.Offset)).Elem() +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go index 8f7dd36b70..463084a702 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go @@ -1,10 +1,10 @@ package runtime import ( + "context" "fmt" "net/http" "strings" - "context" "github.com/golang/protobuf/proto" "google.golang.org/grpc/codes" diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go index 223a12ecc4..f16a84ad38 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go @@ -144,16 +144,7 @@ func MustPattern(p Pattern, err error) Pattern { // If otherwise, the function returns an error. func (p Pattern) Match(components []string, verb string) (map[string]string, error) { if p.verb != verb { - if p.verb != "" { - return nil, ErrNotMatch - } - if len(components) == 0 { - components = []string{":" + verb} - } else { - components = append([]string{}, components...) - components[len(components)-1] += ":" + verb - } - verb = "" + return nil, ErrNotMatch } var pos int diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go index 7d8a57c280..8d306bf513 100644 --- a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go +++ b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go @@ -26,6 +26,7 @@ func DefaultPooledTransport() *http.Transport { DialContext: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, + DualStack: true, }).DialContext, MaxIdleConns: 100, IdleConnTimeout: 90 * time.Second, diff --git a/vendor/github.com/hashicorp/go-cleanhttp/go.mod b/vendor/github.com/hashicorp/go-cleanhttp/go.mod new file mode 100644 index 0000000000..310f07569f --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/go-cleanhttp diff --git a/vendor/github.com/hashicorp/go-cleanhttp/handlers.go b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go new file mode 100644 index 0000000000..7eda3777f3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go @@ -0,0 +1,43 @@ +package cleanhttp + +import ( + "net/http" + "strings" + "unicode" +) + +// HandlerInput provides input options to cleanhttp's handlers +type HandlerInput struct { + ErrStatus int +} + +// PrintablePathCheckHandler is a middleware that ensures the request path +// contains only printable runes. +func PrintablePathCheckHandler(next http.Handler, input *HandlerInput) http.Handler { + // Nil-check on input to make it optional + if input == nil { + input = &HandlerInput{ + ErrStatus: http.StatusBadRequest, + } + } + + // Default to http.StatusBadRequest on error + if input.ErrStatus == 0 { + input.ErrStatus = http.StatusBadRequest + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Check URL path for non-printable characters + idx := strings.IndexFunc(r.URL.Path, func(c rune) bool { + return !unicode.IsPrint(c) + }) + + if idx != -1 { + w.WriteHeader(input.ErrStatus) + return + } + + next.ServeHTTP(w, r) + return + }) +} diff --git a/vendor/github.com/hashicorp/go-getter/.travis.yml b/vendor/github.com/hashicorp/go-getter/.travis.yml index da804c2906..4fe9176aab 100644 --- a/vendor/github.com/hashicorp/go-getter/.travis.yml +++ b/vendor/github.com/hashicorp/go-getter/.travis.yml @@ -9,15 +9,16 @@ addons: language: go +os: + - linux + - osx + go: - - 1.8.x - - 1.9.x - - master + - "1.11.x" + +before_script: + - go build ./cmd/go-getter branches: only: - master - -matrix: - allow_failures: - - go: master diff --git a/vendor/github.com/hashicorp/go-getter/README.md b/vendor/github.com/hashicorp/go-getter/README.md index 40ace74d8a..d9f8f67fdb 100644 --- a/vendor/github.com/hashicorp/go-getter/README.md +++ b/vendor/github.com/hashicorp/go-getter/README.md @@ -97,7 +97,7 @@ would download the given HTTP URL using the Git protocol. Forced protocols will also override any detectors. -In the absense of a forced protocol, detectors may be run on the URL, transforming +In the absence of a forced protocol, detectors may be run on the URL, transforming the protocol anyways. The above example would've used the Git protocol either way since the Git detector would've detected it was a GitHub URL. @@ -155,20 +155,44 @@ For file downloads of any protocol, go-getter can automatically verify a checksum for you. Note that checksumming only works for downloading files, not directories, but checksumming will work for any protocol. -To checksum a file, append a `checksum` query parameter to the URL. -The paramter value should be in the format of `type:value`, where -type is "md5", "sha1", "sha256", or "sha512". The "value" should be -the actual checksum value. go-getter will parse out this query parameter -automatically and use it to verify the checksum. An example URL -is shown below: +To checksum a file, append a `checksum` query parameter to the URL. go-getter +will parse out this query parameter automatically and use it to verify the +checksum. The parameter value can be in the format of `type:value` or just +`value`, where type is "md5", "sha1", "sha256", "sha512" or "file" . The +"value" should be the actual checksum value or download URL for "file". When +`type` part is omitted, type will be guessed based on the length of the +checksum string. Examples: ``` ./foo.txt?checksum=md5:b7d96c89d09d9e204f5fedc4d5d55b21 ``` +``` +./foo.txt?checksum=b7d96c89d09d9e204f5fedc4d5d55b21 +``` + +``` +./foo.txt?checksum=file:./foo.txt.sha256sum +``` + +When checksumming from a file - ex: with `checksum=file:url` - go-getter will +get the file linked in the URL after `file:` using the same configuration. For +example, in `file:http://releases.ubuntu.com/cosmic/MD5SUMS` go-getter will +download a checksum file under the aforementioned url using the http protocol. +All protocols supported by go-getter can be used. The checksum file will be +downloaded in a temporary file then parsed. The destination of the temporary +file can be changed by setting system specific environment variables: `TMPDIR` +for unix; `TMP`, `TEMP` or `USERPROFILE` on windows. Read godoc of +[os.TempDir](https://golang.org/pkg/os/#TempDir) for more information on the +temporary directory selection. Content of files are expected to be BSD or GNU +style. Once go-getter is done with the checksum file; it is deleted. + The checksum query parameter is never sent to the backend protocol implementation. It is used at a higher level by go-getter itself. +If the destination file exists and the checksums match: download +will be skipped. + ### Unarchiving go-getter will automatically unarchive files into a file or directory @@ -215,11 +239,12 @@ from the URL before going to the final protocol downloader. ## Protocol-Specific Options -This section documents the protocol-specific options that can be specified -for go-getter. These options should be appended to the input as normal query -parameters. Depending on the usage of go-getter, applications may provide -alternate ways of inputting options. For example, [Nomad](https://www.nomadproject.io) -provides a nice options block for specifying options rather than in the URL. +This section documents the protocol-specific options that can be specified for +go-getter. These options should be appended to the input as normal query +parameters ([HTTP headers](#headers) are an exception to this, however). +Depending on the usage of go-getter, applications may provide alternate ways of +inputting options. For example, [Nomad](https://www.nomadproject.io) provides a +nice options block for specifying options rather than in the URL. ## General (All Protocols) @@ -250,6 +275,9 @@ None from a private key file on disk, you would run `base64 -w0 `. **Note**: Git 2.3+ is required to use this feature. + + * `depth` - The Git clone depth. The provided number specifies the last `n` + revisions to clone from the repository. ### Mercurial (`hg`) @@ -263,6 +291,13 @@ To use HTTP basic authentication with go-getter, simply prepend `username:passwo hostname in the URL such as `https://Aladdin:OpenSesame@www.example.com/index.html`. All special characters, including the username and password, must be URL encoded. +#### Headers + +Optional request headers can be added by supplying them in a custom +[`HttpGetter`](https://godoc.org/github.com/hashicorp/go-getter#HttpGetter) +(_not_ as query parameters like most other options). These headers will be sent +out on every request the getter in question makes. + ### S3 (`s3`) S3 takes various access configurations in the URL. Note that it will also diff --git a/vendor/github.com/hashicorp/go-getter/appveyor.yml b/vendor/github.com/hashicorp/go-getter/appveyor.yml index ec48d45ec3..1e8718e17e 100644 --- a/vendor/github.com/hashicorp/go-getter/appveyor.yml +++ b/vendor/github.com/hashicorp/go-getter/appveyor.yml @@ -13,4 +13,4 @@ install: go get -d -v -t ./... build_script: -- cmd: go test -v ./... +- cmd: go test ./... diff --git a/vendor/github.com/hashicorp/go-getter/checksum.go b/vendor/github.com/hashicorp/go-getter/checksum.go new file mode 100644 index 0000000000..ef27951b93 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/checksum.go @@ -0,0 +1,291 @@ +package getter + +import ( + "bufio" + "bytes" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/hex" + "fmt" + "hash" + "io" + "net/url" + "os" + "path/filepath" + "strings" + + urlhelper "github.com/hashicorp/go-getter/helper/url" +) + +// fileChecksum helps verifying the checksum for a file. +type fileChecksum struct { + Type string + Hash hash.Hash + Value []byte + Filename string +} + +// checksum is a simple method to compute the checksum of a source file +// and compare it to the given expected value. +func (c *fileChecksum) checksum(source string) error { + f, err := os.Open(source) + if err != nil { + return fmt.Errorf("Failed to open file for checksum: %s", err) + } + defer f.Close() + + c.Hash.Reset() + if _, err := io.Copy(c.Hash, f); err != nil { + return fmt.Errorf("Failed to hash: %s", err) + } + + if actual := c.Hash.Sum(nil); !bytes.Equal(actual, c.Value) { + return fmt.Errorf( + "Checksums did not match.\nExpected: %s\nGot: %s", + hex.EncodeToString(c.Value), + hex.EncodeToString(actual)) + } + + return nil +} + +// extractChecksum will return a fileChecksum based on the 'checksum' +// parameter of u. +// ex: +// http://hashicorp.com/terraform?checksum= +// http://hashicorp.com/terraform?checksum=: +// http://hashicorp.com/terraform?checksum=file: +// when checksumming from a file, extractChecksum will go get checksum_url +// in a temporary directory, parse the content of the file then delete it. +// Content of files are expected to be BSD style or GNU style. +// +// BSD-style checksum: +// MD5 (file1) = +// MD5 (file2) = +// +// GNU-style: +// file1 +// *file2 +// +// see parseChecksumLine for more detail on checksum file parsing +func (c *Client) extractChecksum(u *url.URL) (*fileChecksum, error) { + q := u.Query() + v := q.Get("checksum") + + if v == "" { + return nil, nil + } + + vs := strings.SplitN(v, ":", 2) + switch len(vs) { + case 2: + break // good + default: + // here, we try to guess the checksum from it's length + // if the type was not passed + return newChecksumFromValue(v, filepath.Base(u.EscapedPath())) + } + + checksumType, checksumValue := vs[0], vs[1] + + switch checksumType { + case "file": + return c.checksumFromFile(checksumValue, u) + default: + return newChecksumFromType(checksumType, checksumValue, filepath.Base(u.EscapedPath())) + } +} + +func newChecksum(checksumValue, filename string) (*fileChecksum, error) { + c := &fileChecksum{ + Filename: filename, + } + var err error + c.Value, err = hex.DecodeString(checksumValue) + if err != nil { + return nil, fmt.Errorf("invalid checksum: %s", err) + } + return c, nil +} + +func newChecksumFromType(checksumType, checksumValue, filename string) (*fileChecksum, error) { + c, err := newChecksum(checksumValue, filename) + if err != nil { + return nil, err + } + + c.Type = strings.ToLower(checksumType) + switch c.Type { + case "md5": + c.Hash = md5.New() + case "sha1": + c.Hash = sha1.New() + case "sha256": + c.Hash = sha256.New() + case "sha512": + c.Hash = sha512.New() + default: + return nil, fmt.Errorf( + "unsupported checksum type: %s", checksumType) + } + + return c, nil +} + +func newChecksumFromValue(checksumValue, filename string) (*fileChecksum, error) { + c, err := newChecksum(checksumValue, filename) + if err != nil { + return nil, err + } + + switch len(c.Value) { + case md5.Size: + c.Hash = md5.New() + c.Type = "md5" + case sha1.Size: + c.Hash = sha1.New() + c.Type = "sha1" + case sha256.Size: + c.Hash = sha256.New() + c.Type = "sha256" + case sha512.Size: + c.Hash = sha512.New() + c.Type = "sha512" + default: + return nil, fmt.Errorf("Unknown type for checksum %s", checksumValue) + } + + return c, nil +} + +// checksumsFromFile will return all the fileChecksums found in file +// +// checksumsFromFile will try to guess the hashing algorithm based on content +// of checksum file +// +// checksumsFromFile will only return checksums for files that match file +// behind src +func (c *Client) checksumFromFile(checksumFile string, src *url.URL) (*fileChecksum, error) { + checksumFileURL, err := urlhelper.Parse(checksumFile) + if err != nil { + return nil, err + } + + tempfile, err := tmpFile("", filepath.Base(checksumFileURL.Path)) + if err != nil { + return nil, err + } + defer os.Remove(tempfile) + + c2 := &Client{ + Ctx: c.Ctx, + Getters: c.Getters, + Decompressors: c.Decompressors, + Detectors: c.Detectors, + Pwd: c.Pwd, + Dir: false, + Src: checksumFile, + Dst: tempfile, + ProgressListener: c.ProgressListener, + } + if err = c2.Get(); err != nil { + return nil, fmt.Errorf( + "Error downloading checksum file: %s", err) + } + + filename := filepath.Base(src.Path) + absPath, err := filepath.Abs(src.Path) + if err != nil { + return nil, err + } + checksumFileDir := filepath.Dir(checksumFileURL.Path) + relpath, err := filepath.Rel(checksumFileDir, absPath) + switch { + case err == nil || + err.Error() == "Rel: can't make "+absPath+" relative to "+checksumFileDir: + // ex: on windows C:\gopath\...\content.txt cannot be relative to \ + // which is okay, may be another expected path will work. + break + default: + return nil, err + } + + // possible file identifiers: + options := []string{ + filename, // ubuntu-14.04.1-server-amd64.iso + "*" + filename, // *ubuntu-14.04.1-server-amd64.iso Standard checksum + "?" + filename, // ?ubuntu-14.04.1-server-amd64.iso shasum -p + relpath, // dir/ubuntu-14.04.1-server-amd64.iso + "./" + relpath, // ./dir/ubuntu-14.04.1-server-amd64.iso + absPath, // fullpath; set if local + } + + f, err := os.Open(tempfile) + if err != nil { + return nil, fmt.Errorf( + "Error opening downloaded file: %s", err) + } + defer f.Close() + rd := bufio.NewReader(f) + for { + line, err := rd.ReadString('\n') + if err != nil { + if err != io.EOF { + return nil, fmt.Errorf( + "Error reading checksum file: %s", err) + } + break + } + checksum, err := parseChecksumLine(line) + if err != nil || checksum == nil { + continue + } + if checksum.Filename == "" { + // filename not sure, let's try + return checksum, nil + } + // make sure the checksum is for the right file + for _, option := range options { + if option != "" && checksum.Filename == option { + // any checksum will work so we return the first one + return checksum, nil + } + } + } + return nil, fmt.Errorf("no checksum found in: %s", checksumFile) +} + +// parseChecksumLine takes a line from a checksum file and returns +// checksumType, checksumValue and filename parseChecksumLine guesses the style +// of the checksum BSD vs GNU by splitting the line and by counting the parts. +// of a line. +// for BSD type sums parseChecksumLine guesses the hashing algorithm +// by checking the length of the checksum. +func parseChecksumLine(line string) (*fileChecksum, error) { + parts := strings.Fields(line) + + switch len(parts) { + case 4: + // BSD-style checksum: + // MD5 (file1) = + // MD5 (file2) = + if len(parts[1]) <= 2 || + parts[1][0] != '(' || parts[1][len(parts[1])-1] != ')' { + return nil, fmt.Errorf( + "Unexpected BSD-style-checksum filename format: %s", line) + } + filename := parts[1][1 : len(parts[1])-1] + return newChecksumFromType(parts[0], parts[3], filename) + case 2: + // GNU-style: + // file1 + // *file2 + return newChecksumFromValue(parts[0], parts[1]) + case 0: + return nil, nil // empty line + default: + return newChecksumFromValue(parts[0], "") + } +} diff --git a/vendor/github.com/hashicorp/go-getter/client.go b/vendor/github.com/hashicorp/go-getter/client.go index 6e5ab82645..007a78ba7c 100644 --- a/vendor/github.com/hashicorp/go-getter/client.go +++ b/vendor/github.com/hashicorp/go-getter/client.go @@ -1,15 +1,8 @@ package getter import ( - "bytes" - "crypto/md5" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "encoding/hex" + "context" "fmt" - "hash" - "io" "io/ioutil" "os" "path/filepath" @@ -17,6 +10,7 @@ import ( "strings" urlhelper "github.com/hashicorp/go-getter/helper/url" + safetemp "github.com/hashicorp/go-safetemp" ) // Client is a client for downloading things. @@ -25,6 +19,9 @@ import ( // Using a client directly allows more fine-grained control over how downloading // is done, as well as customizing the protocols supported. type Client struct { + // Ctx for cancellation + Ctx context.Context + // Src is the source URL to get. // // Dst is the path to save the downloaded thing as. If Dir is set to @@ -61,10 +58,20 @@ type Client struct { // // WARNING: deprecated. If Mode is set, that will take precedence. Dir bool + + // ProgressListener allows to track file downloads. + // By default a no op progress listener is used. + ProgressListener ProgressTracker + + Options []ClientOption } // Get downloads the configured source to the destination. func (c *Client) Get() error { + if err := c.Configure(c.Options...); err != nil { + return err + } + // Store this locally since there are cases we swap this mode := c.Mode if mode == ClientModeInvalid { @@ -75,18 +82,7 @@ func (c *Client) Get() error { } } - // Default decompressor value - decompressors := c.Decompressors - if decompressors == nil { - decompressors = Decompressors - } - - // Detect the URL. This is safe if it is already detected. - detectors := c.Detectors - if detectors == nil { - detectors = Detectors - } - src, err := Detect(c.Src, c.Pwd, detectors) + src, err := Detect(c.Src, c.Pwd, c.Detectors) if err != nil { return err } @@ -100,17 +96,14 @@ func (c *Client) Get() error { dst := c.Dst src, subDir := SourceDirSubdir(src) if subDir != "" { - tmpDir, err := ioutil.TempDir("", "tf") + td, tdcloser, err := safetemp.Dir("", "getter") if err != nil { return err } - if err := os.RemoveAll(tmpDir); err != nil { - return err - } - defer os.RemoveAll(tmpDir) + defer tdcloser.Close() realDst = dst - dst = tmpDir + dst = td } u, err := urlhelper.Parse(src) @@ -121,12 +114,7 @@ func (c *Client) Get() error { force = u.Scheme } - getters := c.Getters - if getters == nil { - getters = Getters - } - - g, ok := getters[force] + g, ok := c.Getters[force] if !ok { return fmt.Errorf( "download not supported for scheme '%s'", force) @@ -152,7 +140,7 @@ func (c *Client) Get() error { if archiveV == "" { // We don't appear to... but is it part of the filename? matchingLen := 0 - for k, _ := range decompressors { + for k := range c.Decompressors { if strings.HasSuffix(u.Path, "."+k) && len(k) > matchingLen { archiveV = k matchingLen = len(k) @@ -165,7 +153,7 @@ func (c *Client) Get() error { // real path. var decompressDst string var decompressDir bool - decompressor := decompressors[archiveV] + decompressor := c.Decompressors[archiveV] if decompressor != nil { // Create a temporary directory to store our archive. We delete // this at the end of everything. @@ -184,44 +172,16 @@ func (c *Client) Get() error { mode = ClientModeFile } - // Determine if we have a checksum - var checksumHash hash.Hash - var checksumValue []byte - if v := q.Get("checksum"); v != "" { - // Delete the query parameter if we have it. - q.Del("checksum") - u.RawQuery = q.Encode() - - // Determine the checksum hash type - checksumType := "" - idx := strings.Index(v, ":") - if idx > -1 { - checksumType = v[:idx] - } - switch checksumType { - case "md5": - checksumHash = md5.New() - case "sha1": - checksumHash = sha1.New() - case "sha256": - checksumHash = sha256.New() - case "sha512": - checksumHash = sha512.New() - default: - return fmt.Errorf( - "unsupported checksum type: %s", checksumType) - } - - // Get the remainder of the value and parse it into bytes - b, err := hex.DecodeString(v[idx+1:]) - if err != nil { - return fmt.Errorf("invalid checksum: %s", err) - } - - // Set our value - checksumValue = b + // Determine checksum if we have one + checksum, err := c.extractChecksum(u) + if err != nil { + return fmt.Errorf("invalid checksum: %s", err) } + // Delete the query parameter if we have it. + q.Del("checksum") + u.RawQuery = q.Encode() + if mode == ClientModeAny { // Ask the getter which client mode to use mode, err = g.ClientMode(u) @@ -250,15 +210,24 @@ func (c *Client) Get() error { // If we're not downloading a directory, then just download the file // and return. if mode == ClientModeFile { - err := g.GetFile(dst, u) - if err != nil { - return err + getFile := true + if checksum != nil { + if err := checksum.checksum(dst); err == nil { + // don't get the file if the checksum of dst is correct + getFile = false + } } - - if checksumHash != nil { - if err := checksum(dst, checksumHash, checksumValue); err != nil { + if getFile { + err := g.GetFile(dst, u) + if err != nil { return err } + + if checksum != nil { + if err := checksum.checksum(dst); err != nil { + return err + } + } } if decompressor != nil { @@ -293,7 +262,7 @@ func (c *Client) Get() error { if decompressor == nil { // If we're getting a directory, then this is an error. You cannot // checksum a directory. TODO: test - if checksumHash != nil { + if checksum != nil { return fmt.Errorf( "checksum cannot be specified for directory download") } @@ -322,30 +291,7 @@ func (c *Client) Get() error { return err } - return copyDir(realDst, subDir, false) - } - - return nil -} - -// checksum is a simple method to compute the checksum of a source file -// and compare it to the given expected value. -func checksum(source string, h hash.Hash, v []byte) error { - f, err := os.Open(source) - if err != nil { - return fmt.Errorf("Failed to open file for checksum: %s", err) - } - defer f.Close() - - if _, err := io.Copy(h, f); err != nil { - return fmt.Errorf("Failed to hash: %s", err) - } - - if actual := h.Sum(nil); !bytes.Equal(actual, v) { - return fmt.Errorf( - "Checksums did not match.\nExpected: %s\nGot: %s", - hex.EncodeToString(v), - hex.EncodeToString(actual)) + return copyDir(c.Ctx, realDst, subDir, false) } return nil diff --git a/vendor/github.com/hashicorp/go-getter/client_option.go b/vendor/github.com/hashicorp/go-getter/client_option.go new file mode 100644 index 0000000000..c1ee413b05 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/client_option.go @@ -0,0 +1,46 @@ +package getter + +import "context" + +// A ClientOption allows to configure a client +type ClientOption func(*Client) error + +// Configure configures a client with options. +func (c *Client) Configure(opts ...ClientOption) error { + if c.Ctx == nil { + c.Ctx = context.Background() + } + c.Options = opts + for _, opt := range opts { + err := opt(c) + if err != nil { + return err + } + } + // Default decompressor values + if c.Decompressors == nil { + c.Decompressors = Decompressors + } + // Default detector values + if c.Detectors == nil { + c.Detectors = Detectors + } + // Default getter values + if c.Getters == nil { + c.Getters = Getters + } + + for _, getter := range c.Getters { + getter.SetClient(c) + } + return nil +} + +// WithContext allows to pass a context to operation +// in order to be able to cancel a download in progress. +func WithContext(ctx context.Context) func(*Client) error { + return func(c *Client) error { + c.Ctx = ctx + return nil + } +} diff --git a/vendor/github.com/hashicorp/go-getter/client_option_progress.go b/vendor/github.com/hashicorp/go-getter/client_option_progress.go new file mode 100644 index 0000000000..9b185f71de --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/client_option_progress.go @@ -0,0 +1,38 @@ +package getter + +import ( + "io" +) + +// WithProgress allows for a user to track +// the progress of a download. +// For example by displaying a progress bar with +// current download. +// Not all getters have progress support yet. +func WithProgress(pl ProgressTracker) func(*Client) error { + return func(c *Client) error { + c.ProgressListener = pl + return nil + } +} + +// ProgressTracker allows to track the progress of downloads. +type ProgressTracker interface { + // TrackProgress should be called when + // a new object is being downloaded. + // src is the location the file is + // downloaded from. + // currentSize is the current size of + // the file in case it is a partial + // download. + // totalSize is the total size in bytes, + // size can be zero if the file size + // is not known. + // stream is the file being downloaded, every + // written byte will add up to processed size. + // + // TrackProgress returns a ReadCloser that wraps the + // download in progress ( stream ). + // When the download is finished, body shall be closed. + TrackProgress(src string, currentSize, totalSize int64, stream io.ReadCloser) (body io.ReadCloser) +} diff --git a/vendor/github.com/hashicorp/go-getter/common.go b/vendor/github.com/hashicorp/go-getter/common.go new file mode 100644 index 0000000000..d2afd8ad88 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/common.go @@ -0,0 +1,14 @@ +package getter + +import ( + "io/ioutil" +) + +func tmpFile(dir, pattern string) (string, error) { + f, err := ioutil.TempFile(dir, pattern) + if err != nil { + return "", err + } + f.Close() + return f.Name(), nil +} diff --git a/vendor/github.com/hashicorp/go-getter/copy_dir.go b/vendor/github.com/hashicorp/go-getter/copy_dir.go index 2f58e8aebe..641fe6d0f1 100644 --- a/vendor/github.com/hashicorp/go-getter/copy_dir.go +++ b/vendor/github.com/hashicorp/go-getter/copy_dir.go @@ -1,7 +1,7 @@ package getter import ( - "io" + "context" "os" "path/filepath" "strings" @@ -11,7 +11,7 @@ import ( // should already exist. // // If ignoreDot is set to true, then dot-prefixed files/folders are ignored. -func copyDir(dst string, src string, ignoreDot bool) error { +func copyDir(ctx context.Context, dst string, src string, ignoreDot bool) error { src, err := filepath.EvalSymlinks(src) if err != nil { return err @@ -66,7 +66,7 @@ func copyDir(dst string, src string, ignoreDot bool) error { } defer dstF.Close() - if _, err := io.Copy(dstF, srcF); err != nil { + if _, err := Copy(ctx, dstF, srcF); err != nil { return err } diff --git a/vendor/github.com/hashicorp/go-getter/decompress.go b/vendor/github.com/hashicorp/go-getter/decompress.go index fc5681d39f..198bb0edd0 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress.go +++ b/vendor/github.com/hashicorp/go-getter/decompress.go @@ -1,7 +1,15 @@ package getter +import ( + "strings" +) + // Decompressor defines the interface that must be implemented to add // support for decompressing a type. +// +// Important: if you're implementing a decompressor, please use the +// containsDotDot helper in this file to ensure that files can't be +// decompressed outside of the specified directory. type Decompressor interface { // Decompress should decompress src to dst. dir specifies whether dst // is a directory or single file. src is guaranteed to be a single file @@ -31,3 +39,20 @@ func init() { "zip": new(ZipDecompressor), } } + +// containsDotDot checks if the filepath value v contains a ".." entry. +// This will check filepath components by splitting along / or \. This +// function is copied directly from the Go net/http implementation. +func containsDotDot(v string) bool { + if !strings.Contains(v, "..") { + return false + } + for _, ent := range strings.FieldsFunc(v, isSlashRune) { + if ent == ".." { + return true + } + } + return false +} + +func isSlashRune(r rune) bool { return r == '/' || r == '\\' } diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tar.go b/vendor/github.com/hashicorp/go-getter/decompress_tar.go index 4c896aae25..b6986a25ae 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_tar.go +++ b/vendor/github.com/hashicorp/go-getter/decompress_tar.go @@ -6,6 +6,7 @@ import ( "io" "os" "path/filepath" + "time" ) // untar is a shared helper for untarring an archive. The reader should provide @@ -14,6 +15,7 @@ func untar(input io.Reader, dst, src string, dir bool) error { tarR := tar.NewReader(input) done := false dirHdrs := []*tar.Header{} + now := time.Now() for { hdr, err := tarR.Next() if err == io.EOF { @@ -35,6 +37,11 @@ func untar(input io.Reader, dst, src string, dir bool) error { path := dst if dir { + // Disallow parent traversal + if containsDotDot(hdr.Name) { + return fmt.Errorf("entry contains '..': %s", hdr.Name) + } + path = filepath.Join(path, hdr.Name) } @@ -90,17 +97,37 @@ func untar(input io.Reader, dst, src string, dir bool) error { return err } - // Set the access and modification time - if err := os.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + // Set the access and modification time if valid, otherwise default to current time + aTime := now + mTime := now + if hdr.AccessTime.Unix() > 0 { + aTime = hdr.AccessTime + } + if hdr.ModTime.Unix() > 0 { + mTime = hdr.ModTime + } + if err := os.Chtimes(path, aTime, mTime); err != nil { return err } } - // Adding a file or subdirectory changes the mtime of a directory - // We therefore wait until we've extracted everything and then set the mtime and atime attributes + // Perform a final pass over extracted directories to update metadata for _, dirHdr := range dirHdrs { path := filepath.Join(dst, dirHdr.Name) - if err := os.Chtimes(path, dirHdr.AccessTime, dirHdr.ModTime); err != nil { + // Chmod the directory since they might be created before we know the mode flags + if err := os.Chmod(path, dirHdr.FileInfo().Mode()); err != nil { + return err + } + // Set the mtime/atime attributes since they would have been changed during extraction + aTime := now + mTime := now + if dirHdr.AccessTime.Unix() > 0 { + aTime = dirHdr.AccessTime + } + if dirHdr.ModTime.Unix() > 0 { + mTime = dirHdr.ModTime + } + if err := os.Chtimes(path, aTime, mTime); err != nil { return err } } diff --git a/vendor/github.com/hashicorp/go-getter/decompress_testing.go b/vendor/github.com/hashicorp/go-getter/decompress_testing.go index 91cf33d98d..b2f662a89d 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_testing.go +++ b/vendor/github.com/hashicorp/go-getter/decompress_testing.go @@ -18,16 +18,18 @@ import ( // TestDecompressCase is a single test case for testing decompressors type TestDecompressCase struct { - Input string // Input is the complete path to the input file - Dir bool // Dir is whether or not we're testing directory mode - Err bool // Err is whether we expect an error or not - DirList []string // DirList is the list of files for Dir mode - FileMD5 string // FileMD5 is the expected MD5 for a single file - Mtime *time.Time // Mtime is the optionally expected mtime for a single file (or all files if in Dir mode) + Input string // Input is the complete path to the input file + Dir bool // Dir is whether or not we're testing directory mode + Err bool // Err is whether we expect an error or not + DirList []string // DirList is the list of files for Dir mode + FileMD5 string // FileMD5 is the expected MD5 for a single file + Mtime *time.Time // Mtime is the optionally expected mtime for a single file (or all files if in Dir mode) } // TestDecompressor is a helper function for testing generic decompressors. func TestDecompressor(t testing.T, d Decompressor, cases []TestDecompressCase) { + t.Helper() + for _, tc := range cases { t.Logf("Testing: %s", tc.Input) @@ -72,9 +74,13 @@ func TestDecompressor(t testing.T, d Decompressor, cases []TestDecompressCase) { if tc.Mtime != nil { actual := fi.ModTime() - expected := *tc.Mtime - if actual != expected { - t.Fatalf("err %s: expected mtime '%s' for %s, got '%s'", tc.Input, expected.String(), dst, actual.String()) + if tc.Mtime.Unix() > 0 { + expected := *tc.Mtime + if actual != expected { + t.Fatalf("err %s: expected mtime '%s' for %s, got '%s'", tc.Input, expected.String(), dst, actual.String()) + } + } else if actual.Unix() <= 0 { + t.Fatalf("err %s: expected mtime to be > 0, got '%s'", actual.String()) } } @@ -103,10 +109,15 @@ func TestDecompressor(t testing.T, d Decompressor, cases []TestDecompressCase) { t.Fatalf("err: %s", err) } actual := fi.ModTime() - expected := *tc.Mtime - if actual != expected { - t.Fatalf("err %s: expected mtime '%s' for %s, got '%s'", tc.Input, expected.String(), path, actual.String()) + if tc.Mtime.Unix() > 0 { + expected := *tc.Mtime + if actual != expected { + t.Fatalf("err %s: expected mtime '%s' for %s, got '%s'", tc.Input, expected.String(), path, actual.String()) + } + } else if actual.Unix() < 0 { + t.Fatalf("err %s: expected mtime to be > 0, got '%s'", actual.String()) } + } } }() diff --git a/vendor/github.com/hashicorp/go-getter/decompress_zip.go b/vendor/github.com/hashicorp/go-getter/decompress_zip.go index a065c076ff..b0e70cac35 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_zip.go +++ b/vendor/github.com/hashicorp/go-getter/decompress_zip.go @@ -42,6 +42,11 @@ func (d *ZipDecompressor) Decompress(dst, src string, dir bool) error { for _, f := range zipR.File { path := dst if dir { + // Disallow parent traversal + if containsDotDot(f.Name) { + return fmt.Errorf("entry contains '..': %s", f.Name) + } + path = filepath.Join(path, f.Name) } diff --git a/vendor/github.com/hashicorp/go-getter/detect.go b/vendor/github.com/hashicorp/go-getter/detect.go index c3695510b3..1485aaa974 100644 --- a/vendor/github.com/hashicorp/go-getter/detect.go +++ b/vendor/github.com/hashicorp/go-getter/detect.go @@ -23,6 +23,7 @@ var Detectors []Detector func init() { Detectors = []Detector{ new(GitHubDetector), + new(GitDetector), new(BitBucketDetector), new(S3Detector), new(FileDetector), diff --git a/vendor/github.com/hashicorp/go-getter/detect_git.go b/vendor/github.com/hashicorp/go-getter/detect_git.go new file mode 100644 index 0000000000..eeb8a04c5e --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect_git.go @@ -0,0 +1,26 @@ +package getter + +// GitDetector implements Detector to detect Git SSH URLs such as +// git@host.com:dir1/dir2 and converts them to proper URLs. +type GitDetector struct{} + +func (d *GitDetector) Detect(src, _ string) (string, bool, error) { + if len(src) == 0 { + return "", false, nil + } + + u, err := detectSSH(src) + if err != nil { + return "", true, err + } + if u == nil { + return "", false, nil + } + + // We require the username to be "git" to assume that this is a Git URL + if u.User.Username() != "git" { + return "", false, nil + } + + return "git::" + u.String(), true, nil +} diff --git a/vendor/github.com/hashicorp/go-getter/detect_github.go b/vendor/github.com/hashicorp/go-getter/detect_github.go index c084ad9acb..4bf4daf238 100644 --- a/vendor/github.com/hashicorp/go-getter/detect_github.go +++ b/vendor/github.com/hashicorp/go-getter/detect_github.go @@ -17,8 +17,6 @@ func (d *GitHubDetector) Detect(src, _ string) (string, bool, error) { if strings.HasPrefix(src, "github.com/") { return d.detectHTTP(src) - } else if strings.HasPrefix(src, "git@github.com:") { - return d.detectSSH(src) } return "", false, nil @@ -47,27 +45,3 @@ func (d *GitHubDetector) detectHTTP(src string) (string, bool, error) { return "git::" + url.String(), true, nil } - -func (d *GitHubDetector) detectSSH(src string) (string, bool, error) { - idx := strings.Index(src, ":") - qidx := strings.Index(src, "?") - if qidx == -1 { - qidx = len(src) - } - - var u url.URL - u.Scheme = "ssh" - u.User = url.User("git") - u.Host = "github.com" - u.Path = src[idx+1 : qidx] - if qidx < len(src) { - q, err := url.ParseQuery(src[qidx+1:]) - if err != nil { - return "", true, fmt.Errorf("error parsing GitHub SSH URL: %s", err) - } - - u.RawQuery = q.Encode() - } - - return "git::" + u.String(), true, nil -} diff --git a/vendor/github.com/hashicorp/go-getter/detect_ssh.go b/vendor/github.com/hashicorp/go-getter/detect_ssh.go new file mode 100644 index 0000000000..c0dbe9d475 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect_ssh.go @@ -0,0 +1,49 @@ +package getter + +import ( + "fmt" + "net/url" + "regexp" + "strings" +) + +// Note that we do not have an SSH-getter currently so this file serves +// only to hold the detectSSH helper that is used by other detectors. + +// sshPattern matches SCP-like SSH patterns (user@host:path) +var sshPattern = regexp.MustCompile("^(?:([^@]+)@)?([^:]+):/?(.+)$") + +// detectSSH determines if the src string matches an SSH-like URL and +// converts it into a net.URL compatible string. This returns nil if the +// string doesn't match the SSH pattern. +// +// This function is tested indirectly via detect_git_test.go +func detectSSH(src string) (*url.URL, error) { + matched := sshPattern.FindStringSubmatch(src) + if matched == nil { + return nil, nil + } + + user := matched[1] + host := matched[2] + path := matched[3] + qidx := strings.Index(path, "?") + if qidx == -1 { + qidx = len(path) + } + + var u url.URL + u.Scheme = "ssh" + u.User = url.User(user) + u.Host = host + u.Path = path[0:qidx] + if qidx < len(path) { + q, err := url.ParseQuery(path[qidx+1:]) + if err != nil { + return nil, fmt.Errorf("error parsing GitHub SSH URL: %s", err) + } + u.RawQuery = q.Encode() + } + + return &u, nil +} diff --git a/vendor/github.com/hashicorp/go-getter/get.go b/vendor/github.com/hashicorp/go-getter/get.go index e6053d934a..9e79201a4f 100644 --- a/vendor/github.com/hashicorp/go-getter/get.go +++ b/vendor/github.com/hashicorp/go-getter/get.go @@ -41,6 +41,11 @@ type Getter interface { // ClientMode returns the mode based on the given URL. This is used to // allow clients to let the getters decide which mode to use. ClientMode(*url.URL) (ClientMode, error) + + // SetClient allows a getter to know it's client + // in order to access client's Get functions or + // progress tracking. + SetClient(*Client) } // Getters is the mapping of scheme to the Getter implementation that will @@ -74,12 +79,12 @@ func init() { // // src is a URL, whereas dst is always just a file path to a folder. This // folder doesn't need to exist. It will be created if it doesn't exist. -func Get(dst, src string) error { +func Get(dst, src string, opts ...ClientOption) error { return (&Client{ Src: src, Dst: dst, Dir: true, - Getters: Getters, + Options: opts, }).Get() } @@ -89,23 +94,23 @@ func Get(dst, src string) error { // dst must be a directory. If src is a file, it will be downloaded // into dst with the basename of the URL. If src is a directory or // archive, it will be unpacked directly into dst. -func GetAny(dst, src string) error { +func GetAny(dst, src string, opts ...ClientOption) error { return (&Client{ Src: src, Dst: dst, Mode: ClientModeAny, - Getters: Getters, + Options: opts, }).Get() } // GetFile downloads the file specified by src into the path specified by // dst. -func GetFile(dst, src string) error { +func GetFile(dst, src string, opts ...ClientOption) error { return (&Client{ Src: src, Dst: dst, Dir: false, - Getters: Getters, + Options: opts, }).Get() } diff --git a/vendor/github.com/hashicorp/go-getter/get_base.go b/vendor/github.com/hashicorp/go-getter/get_base.go new file mode 100644 index 0000000000..09e9b6313b --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_base.go @@ -0,0 +1,20 @@ +package getter + +import "context" + +// getter is our base getter; it regroups +// fields all getters have in common. +type getter struct { + client *Client +} + +func (g *getter) SetClient(c *Client) { g.client = c } + +// Context tries to returns the Contex from the getter's +// client. otherwise context.Background() is returned. +func (g *getter) Context() context.Context { + if g == nil || g.client == nil { + return context.Background() + } + return g.client.Ctx +} diff --git a/vendor/github.com/hashicorp/go-getter/get_file.go b/vendor/github.com/hashicorp/go-getter/get_file.go index e5d2d61d7d..78660839a0 100644 --- a/vendor/github.com/hashicorp/go-getter/get_file.go +++ b/vendor/github.com/hashicorp/go-getter/get_file.go @@ -8,7 +8,11 @@ import ( // FileGetter is a Getter implementation that will download a module from // a file scheme. type FileGetter struct { - // Copy, if set to true, will copy data instead of using a symlink + getter + + // Copy, if set to true, will copy data instead of using a symlink. If + // false, attempts to symlink to speed up the operation and to lower the + // disk space usage. If the symlink fails, may attempt to copy on windows. Copy bool } diff --git a/vendor/github.com/hashicorp/go-getter/get_file_copy.go b/vendor/github.com/hashicorp/go-getter/get_file_copy.go new file mode 100644 index 0000000000..d70fb49512 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_file_copy.go @@ -0,0 +1,29 @@ +package getter + +import ( + "context" + "io" +) + +// readerFunc is syntactic sugar for read interface. +type readerFunc func(p []byte) (n int, err error) + +func (rf readerFunc) Read(p []byte) (n int, err error) { return rf(p) } + +// Copy is a io.Copy cancellable by context +func Copy(ctx context.Context, dst io.Writer, src io.Reader) (int64, error) { + // Copy will call the Reader and Writer interface multiple time, in order + // to copy by chunk (avoiding loading the whole file in memory). + return io.Copy(dst, readerFunc(func(p []byte) (int, error) { + + select { + case <-ctx.Done(): + // context has been canceled + // stop process and propagate "context canceled" error + return 0, ctx.Err() + default: + // otherwise just run default io.Reader implementation + return src.Read(p) + } + })) +} diff --git a/vendor/github.com/hashicorp/go-getter/get_file_unix.go b/vendor/github.com/hashicorp/go-getter/get_file_unix.go index c89a2d5a43..c3b28ae517 100644 --- a/vendor/github.com/hashicorp/go-getter/get_file_unix.go +++ b/vendor/github.com/hashicorp/go-getter/get_file_unix.go @@ -4,7 +4,6 @@ package getter import ( "fmt" - "io" "net/url" "os" "path/filepath" @@ -50,6 +49,7 @@ func (g *FileGetter) Get(dst string, u *url.URL) error { } func (g *FileGetter) GetFile(dst string, u *url.URL) error { + ctx := g.Context() path := u.Path if u.RawPath != "" { path = u.RawPath @@ -98,6 +98,6 @@ func (g *FileGetter) GetFile(dst string, u *url.URL) error { } defer dstF.Close() - _, err = io.Copy(dstF, srcF) + _, err = Copy(ctx, dstF, srcF) return err } diff --git a/vendor/github.com/hashicorp/go-getter/get_file_windows.go b/vendor/github.com/hashicorp/go-getter/get_file_windows.go index f87ed0a0be..24f1acb176 100644 --- a/vendor/github.com/hashicorp/go-getter/get_file_windows.go +++ b/vendor/github.com/hashicorp/go-getter/get_file_windows.go @@ -4,15 +4,16 @@ package getter import ( "fmt" - "io" "net/url" "os" "os/exec" "path/filepath" "strings" + "syscall" ) func (g *FileGetter) Get(dst string, u *url.URL) error { + ctx := g.Context() path := u.Path if u.RawPath != "" { path = u.RawPath @@ -51,7 +52,7 @@ func (g *FileGetter) Get(dst string, u *url.URL) error { sourcePath := toBackslash(path) // Use mklink to create a junction point - output, err := exec.Command("cmd", "/c", "mklink", "/J", dst, sourcePath).CombinedOutput() + output, err := exec.CommandContext(ctx, "cmd", "/c", "mklink", "/J", dst, sourcePath).CombinedOutput() if err != nil { return fmt.Errorf("failed to run mklink %v %v: %v %q", dst, sourcePath, err, output) } @@ -60,6 +61,7 @@ func (g *FileGetter) Get(dst string, u *url.URL) error { } func (g *FileGetter) GetFile(dst string, u *url.URL) error { + ctx := g.Context() path := u.Path if u.RawPath != "" { path = u.RawPath @@ -92,7 +94,21 @@ func (g *FileGetter) GetFile(dst string, u *url.URL) error { // If we're not copying, just symlink and we're done if !g.Copy { - return os.Symlink(path, dst) + if err = os.Symlink(path, dst); err == nil { + return err + } + lerr, ok := err.(*os.LinkError) + if !ok { + return err + } + switch lerr.Err { + case syscall.ERROR_PRIVILEGE_NOT_HELD: + // no symlink privilege, let's + // fallback to a copy to avoid an error. + break + default: + return err + } } // Copy @@ -108,7 +124,7 @@ func (g *FileGetter) GetFile(dst string, u *url.URL) error { } defer dstF.Close() - _, err = io.Copy(dstF, srcF) + _, err = Copy(ctx, dstF, srcF) return err } diff --git a/vendor/github.com/hashicorp/go-getter/get_git.go b/vendor/github.com/hashicorp/go-getter/get_git.go index 6f5d9142bc..2ff00d20ff 100644 --- a/vendor/github.com/hashicorp/go-getter/get_git.go +++ b/vendor/github.com/hashicorp/go-getter/get_git.go @@ -1,6 +1,7 @@ package getter import ( + "context" "encoding/base64" "fmt" "io/ioutil" @@ -8,27 +9,34 @@ import ( "os" "os/exec" "path/filepath" + "runtime" + "strconv" "strings" urlhelper "github.com/hashicorp/go-getter/helper/url" - "github.com/hashicorp/go-version" + safetemp "github.com/hashicorp/go-safetemp" + version "github.com/hashicorp/go-version" ) // GitGetter is a Getter implementation that will download a module from // a git repository. -type GitGetter struct{} +type GitGetter struct { + getter +} func (g *GitGetter) ClientMode(_ *url.URL) (ClientMode, error) { return ClientModeDir, nil } func (g *GitGetter) Get(dst string, u *url.URL) error { + ctx := g.Context() if _, err := exec.LookPath("git"); err != nil { return fmt.Errorf("git must be available and on the PATH") } // Extract some query parameters we use var ref, sshKey string + var depth int q := u.Query() if len(q) > 0 { ref = q.Get("ref") @@ -37,6 +45,11 @@ func (g *GitGetter) Get(dst string, u *url.URL) error { sshKey = q.Get("sshkey") q.Del("sshkey") + if n, err := strconv.Atoi(q.Get("depth")); err == nil { + depth = n + } + q.Del("depth") + // Copy the URL var newU url.URL = *u u = &newU @@ -77,15 +90,35 @@ func (g *GitGetter) Get(dst string, u *url.URL) error { } } + // For SSH-style URLs, if they use the SCP syntax of host:path, then + // the URL will be mangled. We detect that here and correct the path. + // Example: host:path/bar will turn into host/path/bar + if u.Scheme == "ssh" { + if idx := strings.Index(u.Host, ":"); idx > -1 { + // Copy the URL so we don't modify the input + var newU url.URL = *u + u = &newU + + // Path includes the part after the ':'. + u.Path = u.Host[idx+1:] + u.Path + if u.Path[0] != '/' { + u.Path = "/" + u.Path + } + + // Host trims up to the : + u.Host = u.Host[:idx] + } + } + // Clone or update the repository _, err := os.Stat(dst) if err != nil && !os.IsNotExist(err) { return err } if err == nil { - err = g.update(dst, sshKeyFile, ref) + err = g.update(ctx, dst, sshKeyFile, ref, depth) } else { - err = g.clone(dst, sshKeyFile, u) + err = g.clone(ctx, dst, sshKeyFile, u, depth) } if err != nil { return err @@ -99,19 +132,17 @@ func (g *GitGetter) Get(dst string, u *url.URL) error { } // Lastly, download any/all submodules. - return g.fetchSubmodules(dst, sshKeyFile) + return g.fetchSubmodules(ctx, dst, sshKeyFile, depth) } // GetFile for Git doesn't support updating at this time. It will download // the file every time. func (g *GitGetter) GetFile(dst string, u *url.URL) error { - td, err := ioutil.TempDir("", "getter-git") + td, tdcloser, err := safetemp.Dir("", "getter") if err != nil { return err } - if err := os.RemoveAll(td); err != nil { - return err - } + defer tdcloser.Close() // Get the filename, and strip the filename from the URL so we can // just get the repository directly. @@ -139,16 +170,23 @@ func (g *GitGetter) checkout(dst string, ref string) error { return getRunCommand(cmd) } -func (g *GitGetter) clone(dst, sshKeyFile string, u *url.URL) error { - cmd := exec.Command("git", "clone", u.String(), dst) +func (g *GitGetter) clone(ctx context.Context, dst, sshKeyFile string, u *url.URL, depth int) error { + args := []string{"clone"} + + if depth > 0 { + args = append(args, "--depth", strconv.Itoa(depth)) + } + + args = append(args, u.String(), dst) + cmd := exec.CommandContext(ctx, "git", args...) setupGitEnv(cmd, sshKeyFile) return getRunCommand(cmd) } -func (g *GitGetter) update(dst, sshKeyFile, ref string) error { +func (g *GitGetter) update(ctx context.Context, dst, sshKeyFile, ref string, depth int) error { // Determine if we're a branch. If we're NOT a branch, then we just // switch to master prior to checking out - cmd := exec.Command("git", "show-ref", "-q", "--verify", "refs/heads/"+ref) + cmd := exec.CommandContext(ctx, "git", "show-ref", "-q", "--verify", "refs/heads/"+ref) cmd.Dir = dst if getRunCommand(cmd) != nil { @@ -163,15 +201,24 @@ func (g *GitGetter) update(dst, sshKeyFile, ref string) error { return err } - cmd = exec.Command("git", "pull", "--ff-only") + if depth > 0 { + cmd = exec.Command("git", "pull", "--depth", strconv.Itoa(depth), "--ff-only") + } else { + cmd = exec.Command("git", "pull", "--ff-only") + } + cmd.Dir = dst setupGitEnv(cmd, sshKeyFile) return getRunCommand(cmd) } // fetchSubmodules downloads any configured submodules recursively. -func (g *GitGetter) fetchSubmodules(dst, sshKeyFile string) error { - cmd := exec.Command("git", "submodule", "update", "--init", "--recursive") +func (g *GitGetter) fetchSubmodules(ctx context.Context, dst, sshKeyFile string, depth int) error { + args := []string{"submodule", "update", "--init", "--recursive"} + if depth > 0 { + args = append(args, "--depth", strconv.Itoa(depth)) + } + cmd := exec.CommandContext(ctx, "git", args...) cmd.Dir = dst setupGitEnv(cmd, sshKeyFile) return getRunCommand(cmd) @@ -188,7 +235,7 @@ func setupGitEnv(cmd *exec.Cmd, sshKeyFile string) { // with versions of Go < 1.9. env := os.Environ() for i, v := range env { - if strings.HasPrefix(v, gitSSHCommand) { + if strings.HasPrefix(v, gitSSHCommand) && len(v) > len(gitSSHCommand) { sshCmd = []string{v} env[i], env[len(env)-1] = env[len(env)-1], env[i] @@ -203,6 +250,9 @@ func setupGitEnv(cmd *exec.Cmd, sshKeyFile string) { if sshKeyFile != "" { // We have an SSH key temp file configured, tell ssh about this. + if runtime.GOOS == "windows" { + sshKeyFile = strings.Replace(sshKeyFile, `\`, `/`, -1) + } sshCmd = append(sshCmd, "-i", sshKeyFile) } @@ -225,11 +275,20 @@ func checkGitVersion(min string) error { } fields := strings.Fields(string(out)) - if len(fields) != 3 { + if len(fields) < 3 { return fmt.Errorf("Unexpected 'git version' output: %q", string(out)) } + v := fields[2] + if runtime.GOOS == "windows" && strings.Contains(v, ".windows.") { + // on windows, git version will return for example: + // git version 2.20.1.windows.1 + // Which does not follow the semantic versionning specs + // https://semver.org. We remove that part in order for + // go-version to not error. + v = v[:strings.Index(v, ".windows.")] + } - have, err := version.NewVersion(fields[2]) + have, err := version.NewVersion(v) if err != nil { return err } diff --git a/vendor/github.com/hashicorp/go-getter/get_hg.go b/vendor/github.com/hashicorp/go-getter/get_hg.go index 820bdd488e..290649c910 100644 --- a/vendor/github.com/hashicorp/go-getter/get_hg.go +++ b/vendor/github.com/hashicorp/go-getter/get_hg.go @@ -1,8 +1,8 @@ package getter import ( + "context" "fmt" - "io/ioutil" "net/url" "os" "os/exec" @@ -10,17 +10,21 @@ import ( "runtime" urlhelper "github.com/hashicorp/go-getter/helper/url" + safetemp "github.com/hashicorp/go-safetemp" ) // HgGetter is a Getter implementation that will download a module from // a Mercurial repository. -type HgGetter struct{} +type HgGetter struct { + getter +} func (g *HgGetter) ClientMode(_ *url.URL) (ClientMode, error) { return ClientModeDir, nil } func (g *HgGetter) Get(dst string, u *url.URL) error { + ctx := g.Context() if _, err := exec.LookPath("hg"); err != nil { return fmt.Errorf("hg must be available and on the PATH") } @@ -58,19 +62,19 @@ func (g *HgGetter) Get(dst string, u *url.URL) error { return err } - return g.update(dst, newURL, rev) + return g.update(ctx, dst, newURL, rev) } // GetFile for Hg doesn't support updating at this time. It will download // the file every time. func (g *HgGetter) GetFile(dst string, u *url.URL) error { - td, err := ioutil.TempDir("", "getter-hg") + // Create a temporary directory to store the full source. This has to be + // a non-existent directory. + td, tdcloser, err := safetemp.Dir("", "getter") if err != nil { return err } - if err := os.RemoveAll(td); err != nil { - return err - } + defer tdcloser.Close() // Get the filename, and strip the filename from the URL so we can // just get the repository directly. @@ -93,7 +97,7 @@ func (g *HgGetter) GetFile(dst string, u *url.URL) error { return err } - fg := &FileGetter{Copy: true} + fg := &FileGetter{Copy: true, getter: g.getter} return fg.GetFile(dst, u) } @@ -108,13 +112,13 @@ func (g *HgGetter) pull(dst string, u *url.URL) error { return getRunCommand(cmd) } -func (g *HgGetter) update(dst string, u *url.URL, rev string) error { +func (g *HgGetter) update(ctx context.Context, dst string, u *url.URL, rev string) error { args := []string{"update"} if rev != "" { args = append(args, rev) } - cmd := exec.Command("hg", args...) + cmd := exec.CommandContext(ctx, "hg", args...) cmd.Dir = dst return getRunCommand(cmd) } diff --git a/vendor/github.com/hashicorp/go-getter/get_http.go b/vendor/github.com/hashicorp/go-getter/get_http.go index 084ecb8564..7c4541c6e9 100644 --- a/vendor/github.com/hashicorp/go-getter/get_http.go +++ b/vendor/github.com/hashicorp/go-getter/get_http.go @@ -1,15 +1,18 @@ package getter import ( + "context" "encoding/xml" "fmt" "io" - "io/ioutil" "net/http" "net/url" "os" "path/filepath" + "strconv" "strings" + + safetemp "github.com/hashicorp/go-safetemp" ) // HttpGetter is a Getter implementation that will download from an HTTP @@ -17,7 +20,7 @@ import ( // // For file downloads, HTTP is used directly. // -// The protocol for downloading a directory from an HTTP endpoing is as follows: +// The protocol for downloading a directory from an HTTP endpoint is as follows: // // An HTTP GET request is made to the URL with the additional GET parameter // "terraform-get=1". This lets you handle that scenario specially if you @@ -33,6 +36,8 @@ import ( // formed URL. The shorthand syntax of "github.com/foo/bar" or relative // paths are not allowed. type HttpGetter struct { + getter + // Netrc, if true, will lookup and use auth information found // in the user's netrc file if available. Netrc bool @@ -40,6 +45,12 @@ type HttpGetter struct { // Client is the http.Client to use for Get requests. // This defaults to a cleanhttp.DefaultClient if left unset. Client *http.Client + + // Header contains optional request header fields that should be included + // with every HTTP request. Note that the zero value of this field is nil, + // and as such it needs to be initialized before use, via something like + // make(http.Header). + Header http.Header } func (g *HttpGetter) ClientMode(u *url.URL) (ClientMode, error) { @@ -50,6 +61,7 @@ func (g *HttpGetter) ClientMode(u *url.URL) (ClientMode, error) { } func (g *HttpGetter) Get(dst string, u *url.URL) error { + ctx := g.Context() // Copy the URL so we can modify it var newU url.URL = *u u = &newU @@ -71,10 +83,17 @@ func (g *HttpGetter) Get(dst string, u *url.URL) error { u.RawQuery = q.Encode() // Get the URL - resp, err := g.Client.Get(u.String()) + req, err := http.NewRequest("GET", u.String(), nil) if err != nil { return err } + + req.Header = g.Header + resp, err := g.Client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode >= 300 { return fmt.Errorf("bad response code: %d", resp.StatusCode) @@ -98,70 +117,121 @@ func (g *HttpGetter) Get(dst string, u *url.URL) error { // into a temporary directory, then copy over the proper subdir. source, subDir := SourceDirSubdir(source) if subDir == "" { - return Get(dst, source) + var opts []ClientOption + if g.client != nil { + opts = g.client.Options + } + return Get(dst, source, opts...) } // We have a subdir, time to jump some hoops - return g.getSubdir(dst, source, subDir) + return g.getSubdir(ctx, dst, source, subDir) } -func (g *HttpGetter) GetFile(dst string, u *url.URL) error { +func (g *HttpGetter) GetFile(dst string, src *url.URL) error { + ctx := g.Context() if g.Netrc { // Add auth from netrc if we can - if err := addAuthFromNetrc(u); err != nil { + if err := addAuthFromNetrc(src); err != nil { return err } } - if g.Client == nil { - g.Client = httpClient + // Create all the parent directories if needed + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err } - resp, err := g.Client.Get(u.String()) + f, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE, os.FileMode(0666)) if err != nil { return err } - defer resp.Body.Close() - if resp.StatusCode != 200 { - return fmt.Errorf("bad response code: %d", resp.StatusCode) + defer f.Close() + + if g.Client == nil { + g.Client = httpClient } - // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + var currentFileSize int64 + + // We first make a HEAD request so we can check + // if the server supports range queries. If the server/URL doesn't + // support HEAD requests, we just fall back to GET. + req, err := http.NewRequest("HEAD", src.String(), nil) + if err != nil { return err } + if g.Header != nil { + req.Header = g.Header + } + headResp, err := g.Client.Do(req) + if err == nil && headResp != nil { + headResp.Body.Close() + if headResp.StatusCode == 200 { + // If the HEAD request succeeded, then attempt to set the range + // query if we can. + if headResp.Header.Get("Accept-Ranges") == "bytes" { + if fi, err := f.Stat(); err == nil { + if _, err = f.Seek(0, os.SEEK_END); err == nil { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", fi.Size())) + currentFileSize = fi.Size() + totalFileSize, _ := strconv.ParseInt(headResp.Header.Get("Content-Length"), 10, 64) + if currentFileSize >= totalFileSize { + // file already present + return nil + } + } + } + } + } + } + req.Method = "GET" - f, err := os.Create(dst) + resp, err := g.Client.Do(req) if err != nil { return err } + switch resp.StatusCode { + case http.StatusOK, http.StatusPartialContent: + // all good + default: + resp.Body.Close() + return fmt.Errorf("bad response code: %d", resp.StatusCode) + } + + body := resp.Body + + if g.client != nil && g.client.ProgressListener != nil { + // track download + fn := filepath.Base(src.EscapedPath()) + body = g.client.ProgressListener.TrackProgress(fn, currentFileSize, currentFileSize+resp.ContentLength, resp.Body) + } + defer body.Close() - n, err := io.Copy(f, resp.Body) + n, err := Copy(ctx, f, body) if err == nil && n < resp.ContentLength { err = io.ErrShortWrite } - if err1 := f.Close(); err == nil { - err = err1 - } return err } // getSubdir downloads the source into the destination, but with // the proper subdir. -func (g *HttpGetter) getSubdir(dst, source, subDir string) error { - // Create a temporary directory to store the full source - td, err := ioutil.TempDir("", "tf") +func (g *HttpGetter) getSubdir(ctx context.Context, dst, source, subDir string) error { + // Create a temporary directory to store the full source. This has to be + // a non-existent directory. + td, tdcloser, err := safetemp.Dir("", "getter") if err != nil { return err } - defer os.RemoveAll(td) - - // We have to create a subdirectory that doesn't exist for the file - // getter to work. - td = filepath.Join(td, "data") + defer tdcloser.Close() + var opts []ClientOption + if g.client != nil { + opts = g.client.Options + } // Download that into the given directory - if err := Get(td, source); err != nil { + if err := Get(td, source, opts...); err != nil { return err } @@ -187,7 +257,7 @@ func (g *HttpGetter) getSubdir(dst, source, subDir string) error { return err } - return copyDir(dst, sourcePath, false) + return copyDir(ctx, dst, sourcePath, false) } // parseMeta looks for the first meta tag in the given reader that diff --git a/vendor/github.com/hashicorp/go-getter/get_mock.go b/vendor/github.com/hashicorp/go-getter/get_mock.go index 882e694dce..e2a98ea284 100644 --- a/vendor/github.com/hashicorp/go-getter/get_mock.go +++ b/vendor/github.com/hashicorp/go-getter/get_mock.go @@ -6,6 +6,8 @@ import ( // MockGetter is an implementation of Getter that can be used for tests. type MockGetter struct { + getter + // Proxy, if set, will be called after recording the calls below. // If it isn't set, then the *Err values will be returned. Proxy Getter diff --git a/vendor/github.com/hashicorp/go-getter/get_s3.go b/vendor/github.com/hashicorp/go-getter/get_s3.go index ebb3217417..93eeb0b817 100644 --- a/vendor/github.com/hashicorp/go-getter/get_s3.go +++ b/vendor/github.com/hashicorp/go-getter/get_s3.go @@ -1,8 +1,8 @@ package getter import ( + "context" "fmt" - "io" "net/url" "os" "path/filepath" @@ -18,7 +18,9 @@ import ( // S3Getter is a Getter implementation that will download a module from // a S3 bucket. -type S3Getter struct{} +type S3Getter struct { + getter +} func (g *S3Getter) ClientMode(u *url.URL) (ClientMode, error) { // Parse URL @@ -60,6 +62,8 @@ func (g *S3Getter) ClientMode(u *url.URL) (ClientMode, error) { } func (g *S3Getter) Get(dst string, u *url.URL) error { + ctx := g.Context() + // Parse URL region, bucket, path, _, creds, err := g.parseUrl(u) if err != nil { @@ -124,7 +128,7 @@ func (g *S3Getter) Get(dst string, u *url.URL) error { } objDst = filepath.Join(dst, objDst) - if err := g.getObject(client, objDst, bucket, objPath, ""); err != nil { + if err := g.getObject(ctx, client, objDst, bucket, objPath, ""); err != nil { return err } } @@ -134,6 +138,7 @@ func (g *S3Getter) Get(dst string, u *url.URL) error { } func (g *S3Getter) GetFile(dst string, u *url.URL) error { + ctx := g.Context() region, bucket, path, version, creds, err := g.parseUrl(u) if err != nil { return err @@ -142,10 +147,10 @@ func (g *S3Getter) GetFile(dst string, u *url.URL) error { config := g.getAWSConfig(region, u, creds) sess := session.New(config) client := s3.New(sess) - return g.getObject(client, dst, bucket, path, version) + return g.getObject(ctx, client, dst, bucket, path, version) } -func (g *S3Getter) getObject(client *s3.S3, dst, bucket, key, version string) error { +func (g *S3Getter) getObject(ctx context.Context, client *s3.S3, dst, bucket, key, version string) error { req := &s3.GetObjectInput{ Bucket: aws.String(bucket), Key: aws.String(key), @@ -170,7 +175,7 @@ func (g *S3Getter) getObject(client *s3.S3, dst, bucket, key, version string) er } defer f.Close() - _, err = io.Copy(f, resp.Body) + _, err = Copy(ctx, f, resp.Body) return err } diff --git a/vendor/github.com/hashicorp/go-getter/go.mod b/vendor/github.com/hashicorp/go-getter/go.mod new file mode 100644 index 0000000000..d9e9edd106 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/go.mod @@ -0,0 +1,24 @@ +module github.com/hashicorp/go-getter + +require ( + github.com/aws/aws-sdk-go v1.15.78 + github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d + github.com/cheggaaa/pb v1.0.27 + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fatih/color v1.7.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.0 + github.com/hashicorp/go-safetemp v1.0.0 + github.com/hashicorp/go-version v1.1.0 + github.com/mattn/go-colorable v0.0.9 // indirect + github.com/mattn/go-isatty v0.0.4 // indirect + github.com/mattn/go-runewidth v0.0.4 // indirect + github.com/mitchellh/go-homedir v1.0.0 + github.com/mitchellh/go-testing-interface v1.0.0 + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.2 // indirect + github.com/ulikunitz/xz v0.5.5 + golang.org/x/net v0.0.0-20181114220301-adae6a3d119a // indirect + golang.org/x/sys v0.0.0-20181213200352-4d1cda033e06 // indirect + golang.org/x/text v0.3.0 // indirect + gopkg.in/cheggaaa/pb.v1 v1.0.27 // indirect +) diff --git a/vendor/github.com/hashicorp/go-getter/go.sum b/vendor/github.com/hashicorp/go-getter/go.sum new file mode 100644 index 0000000000..79799fb19c --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/go.sum @@ -0,0 +1,44 @@ +github.com/aws/aws-sdk-go v1.15.78 h1:LaXy6lWR0YK7LKyuU0QWy2ws/LWTPfYV/UgfiBu4tvY= +github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= +github.com/cheggaaa/pb v1.0.27 h1:wIkZHkNfC7R6GI5w7l/PdAdzXzlrbcI3p8OAlnkTsnc= +github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= +github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= +github.com/hashicorp/go-version v1.0.0 h1:21MVWPKDphxa7ineQQTrCU5brh7OuVVAzGOCnnCPtE8= +github.com/hashicorp/go-version v1.0.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.1.0 h1:bPIoEKD27tNdebFGGxxYwcL4nepeY4j1QP23PFRGzg0= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 h1:12VvqtR6Aowv3l/EQUlocDHW2Cp4G9WJVH7uyH8QFJE= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok= +github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a h1:gOpx8G595UYyvj8UK4+OFyY4rx037g3fmfhe5SasG3U= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sys v0.0.0-20181213200352-4d1cda033e06 h1:0oC8rFnE+74kEmuHZ46F6KHsMr5Gx2gUQPuNz28iQZM= +golang.org/x/sys v0.0.0-20181213200352-4d1cda033e06/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/cheggaaa/pb.v1 v1.0.27 h1:kJdccidYzt3CaHD1crCFTS1hxyhSi059NhOFUf03YFo= +gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go b/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go index 4655226f66..4280ec59a8 100644 --- a/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go +++ b/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go @@ -11,19 +11,18 @@ func parse(rawURL string) (*url.URL, error) { // Make sure we're using "/" since URLs are "/"-based. rawURL = filepath.ToSlash(rawURL) + if len(rawURL) > 1 && rawURL[1] == ':' { + // Assume we're dealing with a drive letter. In which case we + // force the 'file' scheme to avoid "net/url" URL.String() prepending + // our url with "./". + rawURL = "file://" + rawURL + } + u, err := url.Parse(rawURL) if err != nil { return nil, err } - if len(rawURL) > 1 && rawURL[1] == ':' { - // Assume we're dealing with a drive letter file path where the drive - // letter has been parsed into the URL Scheme, and the rest of the path - // has been parsed into the URL Path without the leading ':' character. - u.Path = fmt.Sprintf("%s:%s", string(rawURL[0]), u.Path) - u.Scheme = "" - } - if len(u.Host) > 1 && u.Host[1] == ':' && strings.HasPrefix(rawURL, "file://") { // Assume we're dealing with a drive letter file path where the drive // letter has been parsed into the URL Host. diff --git a/vendor/github.com/hashicorp/go-getter/source.go b/vendor/github.com/hashicorp/go-getter/source.go index c63f2bbaf9..dab6d400cb 100644 --- a/vendor/github.com/hashicorp/go-getter/source.go +++ b/vendor/github.com/hashicorp/go-getter/source.go @@ -6,18 +6,31 @@ import ( "strings" ) -// SourceDirSubdir takes a source and returns a tuple of the URL without -// the subdir and the URL with the subdir. +// SourceDirSubdir takes a source URL and returns a tuple of the URL without +// the subdir and the subdir. +// +// ex: +// dom.com/path/?q=p => dom.com/path/?q=p, "" +// proto://dom.com/path//*?q=p => proto://dom.com/path?q=p, "*" +// proto://dom.com/path//path2?q=p => proto://dom.com/path?q=p, "path2" +// func SourceDirSubdir(src string) (string, string) { - // Calcaulate an offset to avoid accidentally marking the scheme + + // URL might contains another url in query parameters + stop := len(src) + if idx := strings.Index(src, "?"); idx > -1 { + stop = idx + } + + // Calculate an offset to avoid accidentally marking the scheme // as the dir. var offset int - if idx := strings.Index(src, "://"); idx > -1 { + if idx := strings.Index(src[:stop], "://"); idx > -1 { offset = idx + 3 } // First see if we even have an explicit subdir - idx := strings.Index(src[offset:], "//") + idx := strings.Index(src[offset:stop], "//") if idx == -1 { return src, "" } diff --git a/vendor/github.com/hashicorp/go-hclog/.gitignore b/vendor/github.com/hashicorp/go-hclog/.gitignore new file mode 100644 index 0000000000..42cc4105ff --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/.gitignore @@ -0,0 +1 @@ +.idea* \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-hclog/README.md b/vendor/github.com/hashicorp/go-hclog/README.md index 614342b2d8..1153e28535 100644 --- a/vendor/github.com/hashicorp/go-hclog/README.md +++ b/vendor/github.com/hashicorp/go-hclog/README.md @@ -10,8 +10,7 @@ interface for use in development and production environments. It provides logging levels that provide decreased output based upon the desired amount of output, unlike the standard library `log` package. -It does not provide `Printf` style logging, only key/value logging that is -exposed as arguments to the logging functions for simplicity. +It provides `Printf` style logging of values via `hclog.Fmt()`. It provides a human readable output mode for use in development as well as JSON output mode for production. @@ -100,6 +99,17 @@ requestLogger.Info("we are transporting a request") This allows sub Loggers to be context specific without having to thread that into all the callers. +### Using `hclog.Fmt()` + +```go +var int totalBandwidth = 200 +appLogger.Info("total bandwidth exceeded", "bandwidth", hclog.Fmt("%d GB/s", totalBandwidth)) +``` + +```text +... [INFO ] my-app: total bandwidth exceeded: bandwidth="200 GB/s" +``` + ### Use this with code that uses the standard library logger If you want to use the standard library's `log.Logger` interface you can wrap diff --git a/vendor/github.com/hashicorp/go-hclog/go.mod b/vendor/github.com/hashicorp/go-hclog/go.mod new file mode 100644 index 0000000000..0d079a6544 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/go.mod @@ -0,0 +1,7 @@ +module github.com/hashicorp/go-hclog + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.2 +) diff --git a/vendor/github.com/hashicorp/go-hclog/go.sum b/vendor/github.com/hashicorp/go-hclog/go.sum new file mode 100644 index 0000000000..e03ee77d9e --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/go.sum @@ -0,0 +1,6 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= diff --git a/vendor/github.com/hashicorp/go-hclog/int.go b/vendor/github.com/hashicorp/go-hclog/int.go index c8609c3575..2aaa1f895d 100644 --- a/vendor/github.com/hashicorp/go-hclog/int.go +++ b/vendor/github.com/hashicorp/go-hclog/int.go @@ -2,15 +2,19 @@ package hclog import ( "bufio" + "bytes" "encoding" "encoding/json" "fmt" "log" "os" + "reflect" "runtime" + "sort" "strconv" "strings" "sync" + "sync/atomic" "time" ) @@ -18,8 +22,8 @@ var ( _levelToBracket = map[Level]string{ Debug: "[DEBUG]", Trace: "[TRACE]", - Info: "[INFO ]", - Warn: "[WARN ]", + Info: "[INFO] ", + Warn: "[WARN] ", Error: "[ERROR]", } ) @@ -40,28 +44,40 @@ func New(opts *LoggerOptions) Logger { level = DefaultLevel } - return &intLogger{ - m: new(sync.Mutex), - json: opts.JSONFormat, - caller: opts.IncludeLocation, - name: opts.Name, - w: bufio.NewWriter(output), - level: level, + mtx := opts.Mutex + if mtx == nil { + mtx = new(sync.Mutex) } + + ret := &intLogger{ + m: mtx, + json: opts.JSONFormat, + caller: opts.IncludeLocation, + name: opts.Name, + timeFormat: TimeFormat, + w: bufio.NewWriter(output), + level: new(int32), + } + if opts.TimeFormat != "" { + ret.timeFormat = opts.TimeFormat + } + atomic.StoreInt32(ret.level, int32(level)) + return ret } // The internal logger implementation. Internal in that it is defined entirely // by this package. type intLogger struct { - json bool - caller bool - name string + json bool + caller bool + name string + timeFormat string // this is a pointer so that it's shared by any derived loggers, since // those derived loggers share the bufio.Writer as well. m *sync.Mutex w *bufio.Writer - level Level + level *int32 implied []interface{} } @@ -76,7 +92,7 @@ const TimeFormat = "2006-01-02T15:04:05.000Z0700" // Log a message and a set of key/value pairs if the given level is at // or more severe that the threshold configured in the Logger. func (z *intLogger) Log(level Level, msg string, args ...interface{}) { - if level < z.level { + if level < Level(atomic.LoadInt32(z.level)) { return } @@ -127,14 +143,14 @@ func trimCallerPath(path string) string { // Non-JSON logging format function func (z *intLogger) log(t time.Time, level Level, msg string, args ...interface{}) { - z.w.WriteString(t.Format(TimeFormat)) + z.w.WriteString(t.Format(z.timeFormat)) z.w.WriteByte(' ') s, ok := _levelToBracket[level] if ok { z.w.WriteString(s) } else { - z.w.WriteString("[UNKN ]") + z.w.WriteString("[?????]") } if z.caller { @@ -175,7 +191,10 @@ func (z *intLogger) log(t time.Time, level Level, msg string, args ...interface{ FOR: for i := 0; i < len(args); i = i + 2 { - var val string + var ( + val string + raw bool + ) switch st := args[i+1].(type) { case string: @@ -203,15 +222,23 @@ func (z *intLogger) log(t time.Time, level Level, msg string, args ...interface{ case CapturedStacktrace: stacktrace = st continue FOR + case Format: + val = fmt.Sprintf(st[0].(string), st[1:]...) default: - val = fmt.Sprintf("%v", st) + v := reflect.ValueOf(st) + if v.Kind() == reflect.Slice { + val = z.renderSlice(v) + raw = true + } else { + val = fmt.Sprintf("%v", st) + } } z.w.WriteByte(' ') z.w.WriteString(args[i].(string)) z.w.WriteByte('=') - if strings.ContainsAny(val, " \t\n\r") { + if !raw && strings.ContainsAny(val, " \t\n\r") { z.w.WriteByte('"') z.w.WriteString(val) z.w.WriteByte('"') @@ -228,6 +255,45 @@ func (z *intLogger) log(t time.Time, level Level, msg string, args ...interface{ } } +func (z *intLogger) renderSlice(v reflect.Value) string { + var buf bytes.Buffer + + buf.WriteRune('[') + + for i := 0; i < v.Len(); i++ { + if i > 0 { + buf.WriteString(", ") + } + + sv := v.Index(i) + + var val string + + switch sv.Kind() { + case reflect.String: + val = sv.String() + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64: + val = strconv.FormatInt(sv.Int(), 10) + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + val = strconv.FormatUint(sv.Uint(), 10) + default: + val = fmt.Sprintf("%v", sv.Interface()) + } + + if strings.ContainsAny(val, " \t\n\r") { + buf.WriteByte('"') + buf.WriteString(val) + buf.WriteByte('"') + } else { + buf.WriteString(val) + } + } + + buf.WriteRune(']') + + return buf.String() +} + // JSON logging function func (z *intLogger) logJson(t time.Time, level Level, msg string, args ...interface{}) { vals := map[string]interface{}{ @@ -263,6 +329,8 @@ func (z *intLogger) logJson(t time.Time, level Level, msg string, args ...interf } } + args = append(z.implied, args...) + if args != nil && len(args) > 0 { if len(args)%2 != 0 { cs, ok := args[len(args)-1].(CapturedStacktrace) @@ -281,16 +349,20 @@ func (z *intLogger) logJson(t time.Time, level Level, msg string, args ...interf continue } val := args[i+1] - // Check if val is of type error. If error type doesn't - // implement json.Marshaler or encoding.TextMarshaler - // then set val to err.Error() so that it gets marshaled - if err, ok := val.(error); ok { - switch err.(type) { + switch sv := val.(type) { + case error: + // Check if val is of type error. If error type doesn't + // implement json.Marshaler or encoding.TextMarshaler + // then set val to err.Error() so that it gets marshaled + switch sv.(type) { case json.Marshaler, encoding.TextMarshaler: default: - val = err.Error() + val = sv.Error() } + case Format: + val = fmt.Sprintf(sv[0].(string), sv[1:]...) } + vals[args[i].(string)] = val } } @@ -328,36 +400,66 @@ func (z *intLogger) Error(msg string, args ...interface{}) { // Indicate that the logger would emit TRACE level logs func (z *intLogger) IsTrace() bool { - return z.level == Trace + return Level(atomic.LoadInt32(z.level)) == Trace } // Indicate that the logger would emit DEBUG level logs func (z *intLogger) IsDebug() bool { - return z.level <= Debug + return Level(atomic.LoadInt32(z.level)) <= Debug } // Indicate that the logger would emit INFO level logs func (z *intLogger) IsInfo() bool { - return z.level <= Info + return Level(atomic.LoadInt32(z.level)) <= Info } // Indicate that the logger would emit WARN level logs func (z *intLogger) IsWarn() bool { - return z.level <= Warn + return Level(atomic.LoadInt32(z.level)) <= Warn } // Indicate that the logger would emit ERROR level logs func (z *intLogger) IsError() bool { - return z.level <= Error + return Level(atomic.LoadInt32(z.level)) <= Error } // Return a sub-Logger for which every emitted log message will contain // the given key/value pairs. This is used to create a context specific // Logger. func (z *intLogger) With(args ...interface{}) Logger { + if len(args)%2 != 0 { + panic("With() call requires paired arguments") + } + var nz intLogger = *z - nz.implied = append(nz.implied, args...) + result := make(map[string]interface{}, len(z.implied)+len(args)) + keys := make([]string, 0, len(z.implied)+len(args)) + + // Read existing args, store map and key for consistent sorting + for i := 0; i < len(z.implied); i += 2 { + key := z.implied[i].(string) + keys = append(keys, key) + result[key] = z.implied[i+1] + } + // Read new args, store map and key for consistent sorting + for i := 0; i < len(args); i += 2 { + key := args[i].(string) + _, exists := result[key] + if !exists { + keys = append(keys, key) + } + result[key] = args[i+1] + } + + // Sort keys to be consistent + sort.Strings(keys) + + nz.implied = make([]interface{}, 0, len(z.implied)+len(args)) + for _, k := range keys { + nz.implied = append(nz.implied, k) + nz.implied = append(nz.implied, result[k]) + } return &nz } @@ -369,6 +471,8 @@ func (z *intLogger) Named(name string) Logger { if nz.name != "" { nz.name = nz.name + "." + name + } else { + nz.name = name } return &nz @@ -385,6 +489,12 @@ func (z *intLogger) ResetNamed(name string) Logger { return &nz } +// Update the logging level on-the-fly. This will affect all subloggers as +// well. +func (z *intLogger) SetLevel(level Level) { + atomic.StoreInt32(z.level, int32(level)) +} + // Create a *log.Logger that will send it's data through this Logger. This // allows packages that expect to be using the standard library log to actually // use this logger. diff --git a/vendor/github.com/hashicorp/go-hclog/log.go b/vendor/github.com/hashicorp/go-hclog/log.go index 1b36f81126..d98714e0a0 100644 --- a/vendor/github.com/hashicorp/go-hclog/log.go +++ b/vendor/github.com/hashicorp/go-hclog/log.go @@ -5,6 +5,7 @@ import ( "log" "os" "strings" + "sync" ) var ( @@ -12,7 +13,7 @@ var ( DefaultLevel = Info ) -type Level int +type Level int32 const ( // This is a special level used to indicate that no level has been @@ -36,6 +37,18 @@ const ( Error Level = 5 ) +// When processing a value of this type, the logger automatically treats the first +// argument as a Printf formatting string and passes the rest as the values to be +// formatted. For example: L.Info(Fmt{"%d beans/day", beans}). This is a simple +// convience type for when formatting is required. +type Format []interface{} + +// Fmt returns a Format type. This is a convience function for creating a Format +// type. +func Fmt(str string, args ...interface{}) Format { + return append(Format{str}, args...) +} + // LevelFromString returns a Level type for the named log level, or "NoLevel" if // the level string is invalid. This facilitates setting the log level via // config or environment variable by name in a predictable way. @@ -108,6 +121,10 @@ type Logger interface { // the current name as well. ResetNamed(name string) Logger + // Updates the level. This should affect all sub-loggers as well. If an + // implementation cannot update the level on the fly, it should no-op. + SetLevel(level Level) + // Return a value that conforms to the stdlib log.Logger interface StandardLogger(opts *StandardLoggerOptions) *log.Logger } @@ -127,12 +144,18 @@ type LoggerOptions struct { // The threshold for the logger. Anything less severe is supressed Level Level - // Where to write the logs to. Defaults to os.Stdout if nil + // Where to write the logs to. Defaults to os.Stderr if nil Output io.Writer + // An optional mutex pointer in case Output is shared + Mutex *sync.Mutex + // Control if the output should be in JSON. JSONFormat bool // Include file and line information in each log line IncludeLocation bool + + // The time format to use instead of the default + TimeFormat string } diff --git a/vendor/github.com/hashicorp/go-hclog/nulllogger.go b/vendor/github.com/hashicorp/go-hclog/nulllogger.go new file mode 100644 index 0000000000..0942361a52 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/nulllogger.go @@ -0,0 +1,47 @@ +package hclog + +import ( + "io/ioutil" + "log" +) + +// NewNullLogger instantiates a Logger for which all calls +// will succeed without doing anything. +// Useful for testing purposes. +func NewNullLogger() Logger { + return &nullLogger{} +} + +type nullLogger struct{} + +func (l *nullLogger) Trace(msg string, args ...interface{}) {} + +func (l *nullLogger) Debug(msg string, args ...interface{}) {} + +func (l *nullLogger) Info(msg string, args ...interface{}) {} + +func (l *nullLogger) Warn(msg string, args ...interface{}) {} + +func (l *nullLogger) Error(msg string, args ...interface{}) {} + +func (l *nullLogger) IsTrace() bool { return false } + +func (l *nullLogger) IsDebug() bool { return false } + +func (l *nullLogger) IsInfo() bool { return false } + +func (l *nullLogger) IsWarn() bool { return false } + +func (l *nullLogger) IsError() bool { return false } + +func (l *nullLogger) With(args ...interface{}) Logger { return l } + +func (l *nullLogger) Named(name string) Logger { return l } + +func (l *nullLogger) ResetNamed(name string) Logger { return l } + +func (l *nullLogger) SetLevel(level Level) {} + +func (l *nullLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { + return log.New(ioutil.Discard, "", log.LstdFlags) +} diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md index 78d354ed23..fe305ad598 100644 --- a/vendor/github.com/hashicorp/go-plugin/README.md +++ b/vendor/github.com/hashicorp/go-plugin/README.md @@ -76,7 +76,7 @@ must be properly secured to protect this configuration. ## Architecture The HashiCorp plugin system works by launching subprocesses and communicating -over RPC (using standard `net/rpc` or [gRPC](http://www.grpc.io). A single +over RPC (using standard `net/rpc` or [gRPC](http://www.grpc.io)). A single connection is made between any plugin and the host process. For net/rpc-based plugins, we use a [connection multiplexing](https://github.com/hashicorp/yamux) library to multiplex any other connections on top. For gRPC-based plugins, @@ -109,7 +109,7 @@ high-level steps that must be done. Examples are available in the 1. Choose the interface(s) you want to expose for plugins. 2. For each interface, implement an implementation of that interface - that communicates over a `net/rpc` connection or other a + that communicates over a `net/rpc` connection or over a [gRPC](http://www.grpc.io) connection or both. You'll have to implement both a client and server implementation. @@ -150,19 +150,19 @@ user experience. When we started using plugins (late 2012, early 2013), plugins over RPC were the only option since Go didn't support dynamic library loading. Today, -Go still doesn't support dynamic library loading, but they do intend to. -Since 2012, our plugin system has stabilized from millions of users using it, -and has many benefits we've come to value greatly. - -For example, we intend to use this plugin system in -[Vault](https://www.vaultproject.io), and dynamic library loading will -simply never be acceptable in Vault for security reasons. That is an extreme +Go supports the [plugin](https://golang.org/pkg/plugin/) standard library with +a number of limitations. Since 2012, our plugin system has stabilized +from tens of millions of users using it, and has many benefits we've come to +value greatly. + +For example, we use this plugin system in +[Vault](https://www.vaultproject.io) where dynamic library loading is +not acceptable for security reasons. That is an extreme example, but we believe our library system has more upsides than downsides over dynamic library loading and since we've had it built and tested for years, -we'll likely continue to use it. +we'll continue to use it. Shared libraries have one major advantage over our system which is much higher performance. In real world scenarios across our various tools, we've never required any more performance out of our plugin system and it has seen very high throughput, so this isn't a concern for us at the moment. - diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go index b912826b20..8118b5876c 100644 --- a/vendor/github.com/hashicorp/go-plugin/client.go +++ b/vendor/github.com/hashicorp/go-plugin/client.go @@ -2,14 +2,16 @@ package plugin import ( "bufio" + "context" "crypto/subtle" "crypto/tls" + "crypto/x509" + "encoding/base64" "errors" "fmt" "hash" "io" "io/ioutil" - "log" "net" "os" "os/exec" @@ -19,7 +21,6 @@ import ( "sync" "sync/atomic" "time" - "unicode" hclog "github.com/hashicorp/go-hclog" ) @@ -70,15 +71,31 @@ var ( // // See NewClient and ClientConfig for using a Client. type Client struct { - config *ClientConfig - exited bool - doneLogging chan struct{} - l sync.Mutex - address net.Addr - process *os.Process - client ClientProtocol - protocol Protocol - logger hclog.Logger + config *ClientConfig + exited bool + l sync.Mutex + address net.Addr + process *os.Process + client ClientProtocol + protocol Protocol + logger hclog.Logger + doneCtx context.Context + ctxCancel context.CancelFunc + negotiatedVersion int + + // clientWaitGroup is used to manage the lifecycle of the plugin management + // goroutines. + clientWaitGroup sync.WaitGroup + + // processKilled is used for testing only, to flag when the process was + // forcefully killed. + processKilled bool +} + +// NegotiatedVersion returns the protocol version negotiated with the server. +// This is only valid after Start() is called. +func (c *Client) NegotiatedVersion() int { + return c.negotiatedVersion } // ClientConfig is the configuration used to initialize a new @@ -89,7 +106,13 @@ type ClientConfig struct { HandshakeConfig // Plugins are the plugins that can be consumed. - Plugins map[string]Plugin + // The implied version of this PluginSet is the Handshake.ProtocolVersion. + Plugins PluginSet + + // VersionedPlugins is a map of PluginSets for specific protocol versions. + // These can be used to negotiate a compatible version between client and + // server. If this is set, Handshake.ProtocolVersion is not required. + VersionedPlugins map[int]PluginSet // One of the following must be set, but not both. // @@ -156,6 +179,29 @@ type ClientConfig struct { // Logger is the logger that the client will used. If none is provided, // it will default to hclog's default logger. Logger hclog.Logger + + // AutoMTLS has the client and server automatically negotiate mTLS for + // transport authentication. This ensures that only the original client will + // be allowed to connect to the server, and all other connections will be + // rejected. The client will also refuse to connect to any server that isn't + // the original instance started by the client. + // + // In this mode of operation, the client generates a one-time use tls + // certificate, sends the public x.509 certificate to the new server, and + // the server generates a one-time use tls certificate, and sends the public + // x.509 certificate back to the client. These are used to authenticate all + // rpc connections between the client and server. + // + // Setting AutoMTLS to true implies that the server must support the + // protocol, and correctly negotiate the tls certificates, or a connection + // failure will result. + // + // The client should not set TLSConfig, nor should the server set a + // TLSProvider, because AutoMTLS implies that a new certificate and tls + // configuration will be generated at startup. + // + // You cannot Reattach to a server with this option enabled. + AutoMTLS bool } // ReattachConfig is used to configure a client to reattach to an @@ -232,7 +278,6 @@ func CleanupClients() { } managedClientsLock.Unlock() - log.Println("[DEBUG] plugin: waiting for all plugin processes to complete...") wg.Wait() } @@ -310,7 +355,7 @@ func (c *Client) Client() (ClientProtocol, error) { c.client, err = newRPCClient(c) case ProtocolGRPC: - c.client, err = newGRPCClient(c) + c.client, err = newGRPCClient(c.doneCtx, c) default: return nil, fmt.Errorf("unknown server protocol: %s", c.protocol) @@ -331,6 +376,14 @@ func (c *Client) Exited() bool { return c.exited } +// killed is used in tests to check if a process failed to exit gracefully, and +// needed to be killed. +func (c *Client) killed() bool { + c.l.Lock() + defer c.l.Unlock() + return c.processKilled +} + // End the executing subprocess (if it is running) and perform any cleanup // tasks necessary such as capturing any remaining logs and so on. // @@ -342,14 +395,24 @@ func (c *Client) Kill() { c.l.Lock() process := c.process addr := c.address - doneCh := c.doneLogging c.l.Unlock() - // If there is no process, we never started anything. Nothing to kill. + // If there is no process, there is nothing to kill. if process == nil { return } + defer func() { + // Wait for the all client goroutines to finish. + c.clientWaitGroup.Wait() + + // Make sure there is no reference to the old process after it has been + // killed. + c.l.Lock() + c.process = nil + c.l.Unlock() + }() + // We need to check for address here. It is possible that the plugin // started (process != nil) but has no address (addr == nil) if the // plugin failed at startup. If we do have an address, we need to close @@ -370,6 +433,8 @@ func (c *Client) Kill() { // kill in a moment anyways. c.logger.Warn("error closing client during Kill", "err", err) } + } else { + c.logger.Error("client", "error", err) } } @@ -378,17 +443,20 @@ func (c *Client) Kill() { // doneCh which would be closed if the process exits. if graceful { select { - case <-doneCh: + case <-c.doneCtx.Done(): + c.logger.Debug("plugin exited") return - case <-time.After(250 * time.Millisecond): + case <-time.After(2 * time.Second): } } // If graceful exiting failed, just kill it + c.logger.Warn("plugin failed to exit gracefully") process.Kill() - // Wait for the client to finish logging so we have a complete log - <-doneCh + c.l.Lock() + c.processKilled = true + c.l.Unlock() } // Starts the underlying subprocess, communicating with it to negotiate @@ -407,7 +475,7 @@ func (c *Client) Start() (addr net.Addr, err error) { // If one of cmd or reattach isn't set, then it is an error. We wrap // this in a {} for scoping reasons, and hopeful that the escape - // analysis will pop the stock here. + // analysis will pop the stack here. { cmdSet := c.config.Cmd != nil attachSet := c.config.Reattach != nil @@ -421,71 +489,49 @@ func (c *Client) Start() (addr net.Addr, err error) { } } - // Create the logging channel for when we kill - c.doneLogging = make(chan struct{}) - if c.config.Reattach != nil { - // Verify the process still exists. If not, then it is an error - p, err := os.FindProcess(c.config.Reattach.Pid) - if err != nil { - return nil, err - } + return c.reattach() + } - // Attempt to connect to the addr since on Unix systems FindProcess - // doesn't actually return an error if it can't find the process. - conn, err := net.Dial( - c.config.Reattach.Addr.Network(), - c.config.Reattach.Addr.String()) - if err != nil { - p.Kill() - return nil, ErrProcessNotFound - } - conn.Close() - - // Goroutine to mark exit status - go func(pid int) { - // Wait for the process to die - pidWait(pid) - - // Log so we can see it - c.logger.Debug("reattached plugin process exited") - - // Mark it - c.l.Lock() - defer c.l.Unlock() - c.exited = true - - // Close the logging channel since that doesn't work on reattach - close(c.doneLogging) - }(p.Pid) - - // Set the address and process - c.address = c.config.Reattach.Addr - c.process = p - c.protocol = c.config.Reattach.Protocol - if c.protocol == "" { - // Default the protocol to net/rpc for backwards compatibility - c.protocol = ProtocolNetRPC - } + if c.config.VersionedPlugins == nil { + c.config.VersionedPlugins = make(map[int]PluginSet) + } - return c.address, nil + // handle all plugins as versioned, using the handshake config as the default. + version := int(c.config.ProtocolVersion) + + // Make sure we're not overwriting a real version 0. If ProtocolVersion was + // non-zero, then we have to just assume the user made sure that + // VersionedPlugins doesn't conflict. + if _, ok := c.config.VersionedPlugins[version]; !ok && c.config.Plugins != nil { + c.config.VersionedPlugins[version] = c.config.Plugins + } + + var versionStrings []string + for v := range c.config.VersionedPlugins { + versionStrings = append(versionStrings, strconv.Itoa(v)) } env := []string{ fmt.Sprintf("%s=%s", c.config.MagicCookieKey, c.config.MagicCookieValue), fmt.Sprintf("PLUGIN_MIN_PORT=%d", c.config.MinPort), fmt.Sprintf("PLUGIN_MAX_PORT=%d", c.config.MaxPort), + fmt.Sprintf("PLUGIN_PROTOCOL_VERSIONS=%s", strings.Join(versionStrings, ",")), } - stdout_r, stdout_w := io.Pipe() - stderr_r, stderr_w := io.Pipe() - cmd := c.config.Cmd cmd.Env = append(cmd.Env, os.Environ()...) cmd.Env = append(cmd.Env, env...) cmd.Stdin = os.Stdin - cmd.Stderr = stderr_w - cmd.Stdout = stdout_w + + cmdStdout, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + cmdStderr, err := cmd.StderrPipe() + if err != nil { + return nil, err + } if c.config.SecureConfig != nil { if ok, err := c.config.SecureConfig.Check(cmd.Path); err != nil { @@ -495,6 +541,29 @@ func (c *Client) Start() (addr net.Addr, err error) { } } + // Setup a temporary certificate for client/server mtls, and send the public + // certificate to the plugin. + if c.config.AutoMTLS { + c.logger.Info("configuring client automatic mTLS") + certPEM, keyPEM, err := generateCert() + if err != nil { + c.logger.Error("failed to generate client certificate", "error", err) + return nil, err + } + cert, err := tls.X509KeyPair(certPEM, keyPEM) + if err != nil { + c.logger.Error("failed to parse client certificate", "error", err) + return nil, err + } + + cmd.Env = append(cmd.Env, fmt.Sprintf("PLUGIN_CLIENT_CERT=%s", certPEM)) + + c.config.TLSConfig = &tls.Config{ + Certificates: []tls.Certificate{cert}, + ServerName: "localhost", + } + } + c.logger.Debug("starting plugin", "path", cmd.Path, "args", cmd.Args) err = cmd.Start() if err != nil { @@ -503,6 +572,7 @@ func (c *Client) Start() (addr net.Addr, err error) { // Set the process c.process = cmd.Process + c.logger.Debug("plugin started", "path", cmd.Path, "pid", c.process.Pid) // Make sure the command is properly cleaned up if there is an error defer func() { @@ -517,24 +587,37 @@ func (c *Client) Start() (addr net.Addr, err error) { } }() - // Start goroutine to wait for process to exit - exitCh := make(chan struct{}) + // Create a context for when we kill + c.doneCtx, c.ctxCancel = context.WithCancel(context.Background()) + + c.clientWaitGroup.Add(1) go func() { - // Make sure we close the write end of our stderr/stdout so - // that the readers send EOF properly. - defer stderr_w.Close() - defer stdout_w.Close() + // ensure the context is cancelled when we're done + defer c.ctxCancel() + + defer c.clientWaitGroup.Done() + + // get the cmd info early, since the process information will be removed + // in Kill. + pid := c.process.Pid + path := cmd.Path // Wait for the command to end. - cmd.Wait() + err := cmd.Wait() + + debugMsgArgs := []interface{}{ + "path", path, + "pid", pid, + } + if err != nil { + debugMsgArgs = append(debugMsgArgs, + []interface{}{"error", err.Error()}...) + } // Log and make sure to flush the logs write away - c.logger.Debug("plugin process exited", "path", cmd.Path) + c.logger.Debug("plugin process exited", debugMsgArgs...) os.Stderr.Sync() - // Mark that we exited - close(exitCh) - // Set that we exited, which takes a lock c.l.Lock() defer c.l.Unlock() @@ -542,32 +625,33 @@ func (c *Client) Start() (addr net.Addr, err error) { }() // Start goroutine that logs the stderr - go c.logStderr(stderr_r) + c.clientWaitGroup.Add(1) + // logStderr calls Done() + go c.logStderr(cmdStderr) // Start a goroutine that is going to be reading the lines // out of stdout - linesCh := make(chan []byte) + linesCh := make(chan string) + c.clientWaitGroup.Add(1) go func() { + defer c.clientWaitGroup.Done() defer close(linesCh) - buf := bufio.NewReader(stdout_r) - for { - line, err := buf.ReadBytes('\n') - if line != nil { - linesCh <- line - } - - if err == io.EOF { - return - } + scanner := bufio.NewScanner(cmdStdout) + for scanner.Scan() { + linesCh <- scanner.Text() } }() // Make sure after we exit we read the lines from stdout forever - // so they don't block since it is an io.Pipe + // so they don't block since it is a pipe. + // The scanner goroutine above will close this, but track it with a wait + // group for completeness. + c.clientWaitGroup.Add(1) defer func() { go func() { - for _ = range linesCh { + defer c.clientWaitGroup.Done() + for range linesCh { } }() }() @@ -580,12 +664,12 @@ func (c *Client) Start() (addr net.Addr, err error) { select { case <-timeout: err = errors.New("timeout while waiting for plugin to start") - case <-exitCh: + case <-c.doneCtx.Done(): err = errors.New("plugin exited before we could connect") - case lineBytes := <-linesCh: + case line := <-linesCh: // Trim the line and split by "|" in order to get the parts of // the output. - line := strings.TrimSpace(string(lineBytes)) + line = strings.TrimSpace(line) parts := strings.SplitN(line, "|", 6) if len(parts) < 4 { err = fmt.Errorf( @@ -606,27 +690,25 @@ func (c *Client) Start() (addr net.Addr, err error) { if int(coreProtocol) != CoreProtocolVersion { err = fmt.Errorf("Incompatible core API version with plugin. "+ - "Plugin version: %s, Ours: %d\n\n"+ + "Plugin version: %s, Core version: %d\n\n"+ "To fix this, the plugin usually only needs to be recompiled.\n"+ "Please report this to the plugin author.", parts[0], CoreProtocolVersion) return } } - // Parse the protocol version - var protocol int64 - protocol, err = strconv.ParseInt(parts[1], 10, 0) + // Test the API version + version, pluginSet, err := c.checkProtoVersion(parts[1]) if err != nil { - err = fmt.Errorf("Error parsing protocol version: %s", err) - return + return addr, err } - // Test the API version - if uint(protocol) != c.config.ProtocolVersion { - err = fmt.Errorf("Incompatible API version with plugin. "+ - "Plugin version: %s, Ours: %d", parts[1], c.config.ProtocolVersion) - return - } + // set the Plugins value to the compatible set, so the version + // doesn't need to be passed through to the ClientProtocol + // implementation. + c.config.Plugins = pluginSet + c.negotiatedVersion = version + c.logger.Debug("using plugin", "version", version) switch parts[2] { case "tcp": @@ -654,15 +736,125 @@ func (c *Client) Start() (addr net.Addr, err error) { if !found { err = fmt.Errorf("Unsupported plugin protocol %q. Supported: %v", c.protocol, c.config.AllowedProtocols) - return + return addr, err } + // See if we have a TLS certificate from the server. + // Checking if the length is > 50 rules out catching the unused "extra" + // data returned from some older implementations. + if len(parts) >= 6 && len(parts[5]) > 50 { + err := c.loadServerCert(parts[5]) + if err != nil { + return nil, fmt.Errorf("error parsing server cert: %s", err) + } + } } c.address = addr return } +// loadServerCert is used by AutoMTLS to read an x.509 cert returned by the +// server, and load it as the RootCA for the client TLSConfig. +func (c *Client) loadServerCert(cert string) error { + certPool := x509.NewCertPool() + + asn1, err := base64.RawStdEncoding.DecodeString(cert) + if err != nil { + return err + } + + x509Cert, err := x509.ParseCertificate([]byte(asn1)) + if err != nil { + return err + } + + certPool.AddCert(x509Cert) + + c.config.TLSConfig.RootCAs = certPool + return nil +} + +func (c *Client) reattach() (net.Addr, error) { + // Verify the process still exists. If not, then it is an error + p, err := os.FindProcess(c.config.Reattach.Pid) + if err != nil { + return nil, err + } + + // Attempt to connect to the addr since on Unix systems FindProcess + // doesn't actually return an error if it can't find the process. + conn, err := net.Dial( + c.config.Reattach.Addr.Network(), + c.config.Reattach.Addr.String()) + if err != nil { + p.Kill() + return nil, ErrProcessNotFound + } + conn.Close() + + // Create a context for when we kill + c.doneCtx, c.ctxCancel = context.WithCancel(context.Background()) + + c.clientWaitGroup.Add(1) + // Goroutine to mark exit status + go func(pid int) { + defer c.clientWaitGroup.Done() + + // ensure the context is cancelled when we're done + defer c.ctxCancel() + + // Wait for the process to die + pidWait(pid) + + // Log so we can see it + c.logger.Debug("reattached plugin process exited") + + // Mark it + c.l.Lock() + defer c.l.Unlock() + c.exited = true + }(p.Pid) + + // Set the address and process + c.address = c.config.Reattach.Addr + c.process = p + c.protocol = c.config.Reattach.Protocol + if c.protocol == "" { + // Default the protocol to net/rpc for backwards compatibility + c.protocol = ProtocolNetRPC + } + + return c.address, nil +} + +// checkProtoVersion returns the negotiated version and PluginSet. +// This returns an error if the server returned an incompatible protocol +// version, or an invalid handshake response. +func (c *Client) checkProtoVersion(protoVersion string) (int, PluginSet, error) { + serverVersion, err := strconv.Atoi(protoVersion) + if err != nil { + return 0, nil, fmt.Errorf("Error parsing protocol version %q: %s", protoVersion, err) + } + + // record these for the error message + var clientVersions []int + + // all versions, including the legacy ProtocolVersion have been added to + // the versions set + for version, plugins := range c.config.VersionedPlugins { + clientVersions = append(clientVersions, version) + + if serverVersion != version { + continue + } + return version, plugins, nil + } + + return 0, nil, fmt.Errorf("Incompatible API version with plugin. "+ + "Plugin version: %d, Client versions: %d", serverVersion, clientVersions) +} + // ReattachConfig returns the information that must be provided to NewClient // to reattach to the plugin process that this client started. This is // useful for plugins that detach from their parent process. @@ -707,18 +899,29 @@ func (c *Client) Protocol() Protocol { return c.protocol } +func netAddrDialer(addr net.Addr) func(string, time.Duration) (net.Conn, error) { + return func(_ string, _ time.Duration) (net.Conn, error) { + // Connect to the client + conn, err := net.Dial(addr.Network(), addr.String()) + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok { + // Make sure to set keep alive so that the connection doesn't die + tcpConn.SetKeepAlive(true) + } + + return conn, nil + } +} + // dialer is compatible with grpc.WithDialer and creates the connection // to the plugin. func (c *Client) dialer(_ string, timeout time.Duration) (net.Conn, error) { - // Connect to the client - conn, err := net.Dial(c.address.Network(), c.address.String()) + conn, err := netAddrDialer(c.address)("", timeout) if err != nil { return nil, err } - if tcpConn, ok := conn.(*net.TCPConn); ok { - // Make sure to set keep alive so that the connection doesn't die - tcpConn.SetKeepAlive(true) - } // If we have a TLS config we wrap our connection. We only do this // for net/rpc since gRPC uses its own mechanism for TLS. @@ -729,44 +932,84 @@ func (c *Client) dialer(_ string, timeout time.Duration) (net.Conn, error) { return conn, nil } +var stdErrBufferSize = 64 * 1024 + func (c *Client) logStderr(r io.Reader) { - bufR := bufio.NewReader(r) + defer c.clientWaitGroup.Done() + l := c.logger.Named(filepath.Base(c.config.Cmd.Path)) + + reader := bufio.NewReaderSize(r, stdErrBufferSize) + // continuation indicates the previous line was a prefix + continuation := false + for { - line, err := bufR.ReadString('\n') - if line != "" { - c.config.Stderr.Write([]byte(line)) - line = strings.TrimRightFunc(line, unicode.IsSpace) + line, isPrefix, err := reader.ReadLine() + switch { + case err == io.EOF: + return + case err != nil: + l.Error("reading plugin stderr", "error", err) + return + } - l := c.logger.Named(filepath.Base(c.config.Cmd.Path)) + c.config.Stderr.Write(line) - entry, err := parseJSON(line) - // If output is not JSON format, print directly to Debug - if err != nil { - l.Debug(line) - } else { - out := flattenKVPairs(entry.KVPairs) - - l = l.With("timestamp", entry.Timestamp.Format(hclog.TimeFormat)) - switch hclog.LevelFromString(entry.Level) { - case hclog.Trace: - l.Trace(entry.Message, out...) - case hclog.Debug: - l.Debug(entry.Message, out...) - case hclog.Info: - l.Info(entry.Message, out...) - case hclog.Warn: - l.Warn(entry.Message, out...) - case hclog.Error: - l.Error(entry.Message, out...) - } + // The line was longer than our max token size, so it's likely + // incomplete and won't unmarshal. + if isPrefix || continuation { + l.Debug(string(line)) + + // if we're finishing a continued line, add the newline back in + if !isPrefix { + c.config.Stderr.Write([]byte{'\n'}) } + + continuation = isPrefix + continue } - if err == io.EOF { - break + c.config.Stderr.Write([]byte{'\n'}) + + entry, err := parseJSON(line) + // If output is not JSON format, print directly to Debug + if err != nil { + // Attempt to infer the desired log level from the commonly used + // string prefixes + switch line := string(line); { + case strings.HasPrefix("[TRACE]", line): + l.Trace(line) + case strings.HasPrefix("[DEBUG]", line): + l.Debug(line) + case strings.HasPrefix("[INFO]", line): + l.Info(line) + case strings.HasPrefix("[WARN]", line): + l.Warn(line) + case strings.HasPrefix("[ERROR]", line): + l.Error(line) + default: + l.Debug(line) + } + } else { + out := flattenKVPairs(entry.KVPairs) + + out = append(out, "timestamp", entry.Timestamp.Format(hclog.TimeFormat)) + switch hclog.LevelFromString(entry.Level) { + case hclog.Trace: + l.Trace(entry.Message, out...) + case hclog.Debug: + l.Debug(entry.Message, out...) + case hclog.Info: + l.Info(entry.Message, out...) + case hclog.Warn: + l.Warn(entry.Message, out...) + case hclog.Error: + l.Error(entry.Message, out...) + default: + // if there was no log level, it's likely this is unexpected + // json from something other than hclog, and we should output + // it verbatim. + l.Debug(string(line)) + } } } - - // Flag that we've completed logging for others - close(c.doneLogging) } diff --git a/vendor/github.com/hashicorp/go-plugin/go.mod b/vendor/github.com/hashicorp/go-plugin/go.mod new file mode 100644 index 0000000000..f3ddf44e4c --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/go.mod @@ -0,0 +1,17 @@ +module github.com/hashicorp/go-plugin + +require ( + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect + github.com/golang/protobuf v1.2.0 + github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd + github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb + github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 + github.com/oklog/run v1.0.0 + github.com/stretchr/testify v1.3.0 // indirect + golang.org/x/net v0.0.0-20180826012351-8a410e7b638d + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect + golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc // indirect + golang.org/x/text v0.3.0 // indirect + google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 // indirect + google.golang.org/grpc v1.14.0 +) diff --git a/vendor/github.com/hashicorp/go-plugin/go.sum b/vendor/github.com/hashicorp/go-plugin/go.sum new file mode 100644 index 0000000000..21b14e998d --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/go.sum @@ -0,0 +1,31 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd h1:rNuUHR+CvK1IS89MMtcF0EpcVMZtjKfPRp4MEmt/aTs= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 h1:7GoSOOW2jpsfkntVKaS2rAr1TJqfcxotyaUcuxoZSzg= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc h1:WiYx1rIFmx8c0mXAFtv5D/mHyKe1+jmuP7PViuwqwuQ= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.14.0 h1:ArxJuB1NWfPY6r9Gp9gqwplT0Ge7nqv9msgu03lHLmo= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_broker.go b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go new file mode 100644 index 0000000000..daf142d170 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go @@ -0,0 +1,457 @@ +package plugin + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "log" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/go-plugin/internal/plugin" + + "github.com/oklog/run" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +// streamer interface is used in the broker to send/receive connection +// information. +type streamer interface { + Send(*plugin.ConnInfo) error + Recv() (*plugin.ConnInfo, error) + Close() +} + +// sendErr is used to pass errors back during a send. +type sendErr struct { + i *plugin.ConnInfo + ch chan error +} + +// gRPCBrokerServer is used by the plugin to start a stream and to send +// connection information to/from the plugin. Implements GRPCBrokerServer and +// streamer interfaces. +type gRPCBrokerServer struct { + // send is used to send connection info to the gRPC stream. + send chan *sendErr + + // recv is used to receive connection info from the gRPC stream. + recv chan *plugin.ConnInfo + + // quit closes down the stream. + quit chan struct{} + + // o is used to ensure we close the quit channel only once. + o sync.Once +} + +func newGRPCBrokerServer() *gRPCBrokerServer { + return &gRPCBrokerServer{ + send: make(chan *sendErr), + recv: make(chan *plugin.ConnInfo), + quit: make(chan struct{}), + } +} + +// StartStream implements the GRPCBrokerServer interface and will block until +// the quit channel is closed or the context reports Done. The stream will pass +// connection information to/from the client. +func (s *gRPCBrokerServer) StartStream(stream plugin.GRPCBroker_StartStreamServer) error { + doneCh := stream.Context().Done() + defer s.Close() + + // Proccess send stream + go func() { + for { + select { + case <-doneCh: + return + case <-s.quit: + return + case se := <-s.send: + err := stream.Send(se.i) + se.ch <- err + } + } + }() + + // Process receive stream + for { + i, err := stream.Recv() + if err != nil { + return err + } + select { + case <-doneCh: + return nil + case <-s.quit: + return nil + case s.recv <- i: + } + } + + return nil +} + +// Send is used by the GRPCBroker to pass connection information into the stream +// to the client. +func (s *gRPCBrokerServer) Send(i *plugin.ConnInfo) error { + ch := make(chan error) + defer close(ch) + + select { + case <-s.quit: + return errors.New("broker closed") + case s.send <- &sendErr{ + i: i, + ch: ch, + }: + } + + return <-ch +} + +// Recv is used by the GRPCBroker to pass connection information that has been +// sent from the client from the stream to the broker. +func (s *gRPCBrokerServer) Recv() (*plugin.ConnInfo, error) { + select { + case <-s.quit: + return nil, errors.New("broker closed") + case i := <-s.recv: + return i, nil + } +} + +// Close closes the quit channel, shutting down the stream. +func (s *gRPCBrokerServer) Close() { + s.o.Do(func() { + close(s.quit) + }) +} + +// gRPCBrokerClientImpl is used by the client to start a stream and to send +// connection information to/from the client. Implements GRPCBrokerClient and +// streamer interfaces. +type gRPCBrokerClientImpl struct { + // client is the underlying GRPC client used to make calls to the server. + client plugin.GRPCBrokerClient + + // send is used to send connection info to the gRPC stream. + send chan *sendErr + + // recv is used to receive connection info from the gRPC stream. + recv chan *plugin.ConnInfo + + // quit closes down the stream. + quit chan struct{} + + // o is used to ensure we close the quit channel only once. + o sync.Once +} + +func newGRPCBrokerClient(conn *grpc.ClientConn) *gRPCBrokerClientImpl { + return &gRPCBrokerClientImpl{ + client: plugin.NewGRPCBrokerClient(conn), + send: make(chan *sendErr), + recv: make(chan *plugin.ConnInfo), + quit: make(chan struct{}), + } +} + +// StartStream implements the GRPCBrokerClient interface and will block until +// the quit channel is closed or the context reports Done. The stream will pass +// connection information to/from the plugin. +func (s *gRPCBrokerClientImpl) StartStream() error { + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + defer s.Close() + + stream, err := s.client.StartStream(ctx) + if err != nil { + return err + } + doneCh := stream.Context().Done() + + go func() { + for { + select { + case <-doneCh: + return + case <-s.quit: + return + case se := <-s.send: + err := stream.Send(se.i) + se.ch <- err + } + } + }() + + for { + i, err := stream.Recv() + if err != nil { + return err + } + select { + case <-doneCh: + return nil + case <-s.quit: + return nil + case s.recv <- i: + } + } + + return nil +} + +// Send is used by the GRPCBroker to pass connection information into the stream +// to the plugin. +func (s *gRPCBrokerClientImpl) Send(i *plugin.ConnInfo) error { + ch := make(chan error) + defer close(ch) + + select { + case <-s.quit: + return errors.New("broker closed") + case s.send <- &sendErr{ + i: i, + ch: ch, + }: + } + + return <-ch +} + +// Recv is used by the GRPCBroker to pass connection information that has been +// sent from the plugin to the broker. +func (s *gRPCBrokerClientImpl) Recv() (*plugin.ConnInfo, error) { + select { + case <-s.quit: + return nil, errors.New("broker closed") + case i := <-s.recv: + return i, nil + } +} + +// Close closes the quit channel, shutting down the stream. +func (s *gRPCBrokerClientImpl) Close() { + s.o.Do(func() { + close(s.quit) + }) +} + +// GRPCBroker is responsible for brokering connections by unique ID. +// +// It is used by plugins to create multiple gRPC connections and data +// streams between the plugin process and the host process. +// +// This allows a plugin to request a channel with a specific ID to connect to +// or accept a connection from, and the broker handles the details of +// holding these channels open while they're being negotiated. +// +// The Plugin interface has access to these for both Server and Client. +// The broker can be used by either (optionally) to reserve and connect to +// new streams. This is useful for complex args and return values, +// or anything else you might need a data stream for. +type GRPCBroker struct { + nextId uint32 + streamer streamer + streams map[uint32]*gRPCBrokerPending + tls *tls.Config + doneCh chan struct{} + o sync.Once + + sync.Mutex +} + +type gRPCBrokerPending struct { + ch chan *plugin.ConnInfo + doneCh chan struct{} +} + +func newGRPCBroker(s streamer, tls *tls.Config) *GRPCBroker { + return &GRPCBroker{ + streamer: s, + streams: make(map[uint32]*gRPCBrokerPending), + tls: tls, + doneCh: make(chan struct{}), + } +} + +// Accept accepts a connection by ID. +// +// This should not be called multiple times with the same ID at one time. +func (b *GRPCBroker) Accept(id uint32) (net.Listener, error) { + listener, err := serverListener() + if err != nil { + return nil, err + } + + err = b.streamer.Send(&plugin.ConnInfo{ + ServiceId: id, + Network: listener.Addr().Network(), + Address: listener.Addr().String(), + }) + if err != nil { + return nil, err + } + + return listener, nil +} + +// AcceptAndServe is used to accept a specific stream ID and immediately +// serve a gRPC server on that stream ID. This is used to easily serve +// complex arguments. Each AcceptAndServe call opens a new listener socket and +// sends the connection info down the stream to the dialer. Since a new +// connection is opened every call, these calls should be used sparingly. +// Multiple gRPC server implementations can be registered to a single +// AcceptAndServe call. +func (b *GRPCBroker) AcceptAndServe(id uint32, s func([]grpc.ServerOption) *grpc.Server) { + listener, err := b.Accept(id) + if err != nil { + log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err) + return + } + defer listener.Close() + + var opts []grpc.ServerOption + if b.tls != nil { + opts = []grpc.ServerOption{grpc.Creds(credentials.NewTLS(b.tls))} + } + + server := s(opts) + + // Here we use a run group to close this goroutine if the server is shutdown + // or the broker is shutdown. + var g run.Group + { + // Serve on the listener, if shutting down call GracefulStop. + g.Add(func() error { + return server.Serve(listener) + }, func(err error) { + server.GracefulStop() + }) + } + { + // block on the closeCh or the doneCh. If we are shutting down close the + // closeCh. + closeCh := make(chan struct{}) + g.Add(func() error { + select { + case <-b.doneCh: + case <-closeCh: + } + return nil + }, func(err error) { + close(closeCh) + }) + } + + // Block until we are done + g.Run() +} + +// Close closes the stream and all servers. +func (b *GRPCBroker) Close() error { + b.streamer.Close() + b.o.Do(func() { + close(b.doneCh) + }) + return nil +} + +// Dial opens a connection by ID. +func (b *GRPCBroker) Dial(id uint32) (conn *grpc.ClientConn, err error) { + var c *plugin.ConnInfo + + // Open the stream + p := b.getStream(id) + select { + case c = <-p.ch: + close(p.doneCh) + case <-time.After(5 * time.Second): + return nil, fmt.Errorf("timeout waiting for connection info") + } + + var addr net.Addr + switch c.Network { + case "tcp": + addr, err = net.ResolveTCPAddr("tcp", c.Address) + case "unix": + addr, err = net.ResolveUnixAddr("unix", c.Address) + default: + err = fmt.Errorf("Unknown address type: %s", c.Address) + } + if err != nil { + return nil, err + } + + return dialGRPCConn(b.tls, netAddrDialer(addr)) +} + +// NextId returns a unique ID to use next. +// +// It is possible for very long-running plugin hosts to wrap this value, +// though it would require a very large amount of calls. In practice +// we've never seen it happen. +func (m *GRPCBroker) NextId() uint32 { + return atomic.AddUint32(&m.nextId, 1) +} + +// Run starts the brokering and should be executed in a goroutine, since it +// blocks forever, or until the session closes. +// +// Uses of GRPCBroker never need to call this. It is called internally by +// the plugin host/client. +func (m *GRPCBroker) Run() { + for { + stream, err := m.streamer.Recv() + if err != nil { + // Once we receive an error, just exit + break + } + + // Initialize the waiter + p := m.getStream(stream.ServiceId) + select { + case p.ch <- stream: + default: + } + + go m.timeoutWait(stream.ServiceId, p) + } +} + +func (m *GRPCBroker) getStream(id uint32) *gRPCBrokerPending { + m.Lock() + defer m.Unlock() + + p, ok := m.streams[id] + if ok { + return p + } + + m.streams[id] = &gRPCBrokerPending{ + ch: make(chan *plugin.ConnInfo, 1), + doneCh: make(chan struct{}), + } + return m.streams[id] +} + +func (m *GRPCBroker) timeoutWait(id uint32, p *gRPCBrokerPending) { + // Wait for the stream to either be picked up and connected, or + // for a timeout. + select { + case <-p.doneCh: + case <-time.After(5 * time.Second): + } + + m.Lock() + defer m.Unlock() + + // Delete the stream so no one else can grab it + delete(m.streams, id) +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_client.go b/vendor/github.com/hashicorp/go-plugin/grpc_client.go index 3bcf95efc5..294518ed95 100644 --- a/vendor/github.com/hashicorp/go-plugin/grpc_client.go +++ b/vendor/github.com/hashicorp/go-plugin/grpc_client.go @@ -1,36 +1,35 @@ package plugin import ( + "crypto/tls" "fmt" + "net" + "time" + "github.com/hashicorp/go-plugin/internal/plugin" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/health/grpc_health_v1" ) -// newGRPCClient creates a new GRPCClient. The Client argument is expected -// to be successfully started already with a lock held. -func newGRPCClient(c *Client) (*GRPCClient, error) { +func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, error)) (*grpc.ClientConn, error) { // Build dialing options. opts := make([]grpc.DialOption, 0, 5) - // We use a custom dialer so that we can connect over unix domain sockets - opts = append(opts, grpc.WithDialer(c.dialer)) - - // go-plugin expects to block the connection - opts = append(opts, grpc.WithBlock()) + // We use a custom dialer so that we can connect over unix domain sockets. + opts = append(opts, grpc.WithDialer(dialer)) // Fail right away opts = append(opts, grpc.FailOnNonTempDialError(true)) // If we have no TLS configuration set, we need to explicitly tell grpc // that we're connecting with an insecure connection. - if c.config.TLSConfig == nil { + if tls == nil { opts = append(opts, grpc.WithInsecure()) } else { opts = append(opts, grpc.WithTransportCredentials( - credentials.NewTLS(c.config.TLSConfig))) + credentials.NewTLS(tls))) } // Connect. Note the first parameter is unused because we use a custom @@ -40,20 +39,49 @@ func newGRPCClient(c *Client) (*GRPCClient, error) { return nil, err } - return &GRPCClient{ - Conn: conn, - Plugins: c.config.Plugins, - }, nil + return conn, nil +} + +// newGRPCClient creates a new GRPCClient. The Client argument is expected +// to be successfully started already with a lock held. +func newGRPCClient(doneCtx context.Context, c *Client) (*GRPCClient, error) { + conn, err := dialGRPCConn(c.config.TLSConfig, c.dialer) + if err != nil { + return nil, err + } + + // Start the broker. + brokerGRPCClient := newGRPCBrokerClient(conn) + broker := newGRPCBroker(brokerGRPCClient, c.config.TLSConfig) + go broker.Run() + go brokerGRPCClient.StartStream() + + cl := &GRPCClient{ + Conn: conn, + Plugins: c.config.Plugins, + doneCtx: doneCtx, + broker: broker, + controller: plugin.NewGRPCControllerClient(conn), + } + + return cl, nil } // GRPCClient connects to a GRPCServer over gRPC to dispense plugin types. type GRPCClient struct { Conn *grpc.ClientConn Plugins map[string]Plugin + + doneCtx context.Context + broker *GRPCBroker + + controller plugin.GRPCControllerClient } // ClientProtocol impl. func (c *GRPCClient) Close() error { + c.broker.Close() + c.controller.Shutdown(c.doneCtx, &plugin.Empty{}) return c.Conn.Close() } @@ -69,7 +97,7 @@ func (c *GRPCClient) Dispense(name string) (interface{}, error) { return nil, fmt.Errorf("plugin %q doesn't support gRPC", name) } - return p.GRPCClient(c.Conn) + return p.GRPCClient(c.doneCtx, c.broker, c.Conn) } // ClientProtocol impl. diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_controller.go b/vendor/github.com/hashicorp/go-plugin/grpc_controller.go new file mode 100644 index 0000000000..1a8a8e70ea --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_controller.go @@ -0,0 +1,23 @@ +package plugin + +import ( + "context" + + "github.com/hashicorp/go-plugin/internal/plugin" +) + +// GRPCControllerServer handles shutdown calls to terminate the server when the +// plugin client is closed. +type grpcControllerServer struct { + server *GRPCServer +} + +// Shutdown stops the grpc server. It first will attempt a graceful stop, then a +// full stop on the server. +func (s *grpcControllerServer) Shutdown(ctx context.Context, _ *plugin.Empty) (*plugin.Empty, error) { + resp := &plugin.Empty{} + + // TODO: figure out why GracefullStop doesn't work. + s.server.Stop() + return resp, nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_server.go b/vendor/github.com/hashicorp/go-plugin/grpc_server.go index 177a0cdd7d..d3dbf1cedc 100644 --- a/vendor/github.com/hashicorp/go-plugin/grpc_server.go +++ b/vendor/github.com/hashicorp/go-plugin/grpc_server.go @@ -8,6 +8,8 @@ import ( "io" "net" + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/plugin" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/health" @@ -51,6 +53,9 @@ type GRPCServer struct { config GRPCServerConfig server *grpc.Server + broker *GRPCBroker + + logger hclog.Logger } // ServerProtocol impl. @@ -68,21 +73,43 @@ func (s *GRPCServer) Init() error { GRPCServiceName, grpc_health_v1.HealthCheckResponse_SERVING) grpc_health_v1.RegisterHealthServer(s.server, healthCheck) + // Register the broker service + brokerServer := newGRPCBrokerServer() + plugin.RegisterGRPCBrokerServer(s.server, brokerServer) + s.broker = newGRPCBroker(brokerServer, s.TLS) + go s.broker.Run() + + // Register the controller + controllerServer := &grpcControllerServer{ + server: s, + } + plugin.RegisterGRPCControllerServer(s.server, controllerServer) + // Register all our plugins onto the gRPC server. for k, raw := range s.Plugins { p, ok := raw.(GRPCPlugin) if !ok { - return fmt.Errorf("%q is not a GRPC-compatibile plugin", k) + return fmt.Errorf("%q is not a GRPC-compatible plugin", k) } - if err := p.GRPCServer(s.server); err != nil { - return fmt.Errorf("error registring %q: %s", k, err) + if err := p.GRPCServer(s.broker, s.server); err != nil { + return fmt.Errorf("error registering %q: %s", k, err) } } return nil } +// Stop calls Stop on the underlying grpc.Server +func (s *GRPCServer) Stop() { + s.server.Stop() +} + +// GracefulStop calls GracefulStop on the underlying grpc.Server +func (s *GRPCServer) GracefulStop() { + s.server.GracefulStop() +} + // Config is the GRPCServerConfig encoded as JSON then base64. func (s *GRPCServer) Config() string { // Create a buffer that will contain our final contents @@ -100,11 +127,11 @@ func (s *GRPCServer) Config() string { } func (s *GRPCServer) Serve(lis net.Listener) { - // Start serving in a goroutine - go s.server.Serve(lis) - - // Wait until graceful completion - <-s.DoneCh + defer close(s.DoneCh) + err := s.server.Serve(lis) + if err != nil { + s.logger.Error("grpc server", "error", err) + } } // GRPCServerConfig is the extra configuration passed along for consumers diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go new file mode 100644 index 0000000000..aa2fdc8138 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go @@ -0,0 +1,3 @@ +//go:generate protoc -I ./ ./grpc_broker.proto ./grpc_controller.proto --go_out=plugins=grpc:. + +package plugin diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go new file mode 100644 index 0000000000..b6850aa59e --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go @@ -0,0 +1,203 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_broker.proto + +package plugin + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type ConnInfo struct { + ServiceId uint32 `protobuf:"varint,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + Network string `protobuf:"bytes,2,opt,name=network,proto3" json:"network,omitempty"` + Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConnInfo) Reset() { *m = ConnInfo{} } +func (m *ConnInfo) String() string { return proto.CompactTextString(m) } +func (*ConnInfo) ProtoMessage() {} +func (*ConnInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_802e9beed3ec3b28, []int{0} +} + +func (m *ConnInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConnInfo.Unmarshal(m, b) +} +func (m *ConnInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConnInfo.Marshal(b, m, deterministic) +} +func (m *ConnInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnInfo.Merge(m, src) +} +func (m *ConnInfo) XXX_Size() int { + return xxx_messageInfo_ConnInfo.Size(m) +} +func (m *ConnInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ConnInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ConnInfo proto.InternalMessageInfo + +func (m *ConnInfo) GetServiceId() uint32 { + if m != nil { + return m.ServiceId + } + return 0 +} + +func (m *ConnInfo) GetNetwork() string { + if m != nil { + return m.Network + } + return "" +} + +func (m *ConnInfo) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func init() { + proto.RegisterType((*ConnInfo)(nil), "plugin.ConnInfo") +} + +func init() { proto.RegisterFile("grpc_broker.proto", fileDescriptor_802e9beed3ec3b28) } + +var fileDescriptor_802e9beed3ec3b28 = []byte{ + // 175 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x2f, 0x2a, 0x48, + 0x8e, 0x4f, 0x2a, 0xca, 0xcf, 0x4e, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b, + 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x8a, 0xe5, 0xe2, 0x70, 0xce, 0xcf, 0xcb, 0xf3, 0xcc, 0x4b, + 0xcb, 0x17, 0x92, 0xe5, 0xe2, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x8d, 0xcf, 0x4c, 0x91, + 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0d, 0xe2, 0x84, 0x8a, 0x78, 0xa6, 0x08, 0x49, 0x70, 0xb1, 0xe7, + 0xa5, 0x96, 0x94, 0xe7, 0x17, 0x65, 0x4b, 0x30, 0x29, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xb8, 0x20, + 0x99, 0xc4, 0x94, 0x94, 0xa2, 0xd4, 0xe2, 0x62, 0x09, 0x66, 0x88, 0x0c, 0x94, 0x6b, 0xe4, 0xcc, + 0xc5, 0xe5, 0x1e, 0x14, 0xe0, 0xec, 0x04, 0xb6, 0x5a, 0xc8, 0x94, 0x8b, 0x3b, 0xb8, 0x24, 0xb1, + 0xa8, 0x24, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x57, 0x48, 0x40, 0x0f, 0xe2, 0x08, 0x3d, 0x98, 0x0b, + 0xa4, 0x30, 0x44, 0x34, 0x18, 0x0d, 0x18, 0x9d, 0x38, 0xa2, 0xa0, 0xae, 0x4d, 0x62, 0x03, 0x3b, + 0xde, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x10, 0x15, 0x39, 0x47, 0xd1, 0x00, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// GRPCBrokerClient is the client API for GRPCBroker service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GRPCBrokerClient interface { + StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) +} + +type gRPCBrokerClient struct { + cc *grpc.ClientConn +} + +func NewGRPCBrokerClient(cc *grpc.ClientConn) GRPCBrokerClient { + return &gRPCBrokerClient{cc} +} + +func (c *gRPCBrokerClient) StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &_GRPCBroker_serviceDesc.Streams[0], "/plugin.GRPCBroker/StartStream", opts...) + if err != nil { + return nil, err + } + x := &gRPCBrokerStartStreamClient{stream} + return x, nil +} + +type GRPCBroker_StartStreamClient interface { + Send(*ConnInfo) error + Recv() (*ConnInfo, error) + grpc.ClientStream +} + +type gRPCBrokerStartStreamClient struct { + grpc.ClientStream +} + +func (x *gRPCBrokerStartStreamClient) Send(m *ConnInfo) error { + return x.ClientStream.SendMsg(m) +} + +func (x *gRPCBrokerStartStreamClient) Recv() (*ConnInfo, error) { + m := new(ConnInfo) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GRPCBrokerServer is the server API for GRPCBroker service. +type GRPCBrokerServer interface { + StartStream(GRPCBroker_StartStreamServer) error +} + +func RegisterGRPCBrokerServer(s *grpc.Server, srv GRPCBrokerServer) { + s.RegisterService(&_GRPCBroker_serviceDesc, srv) +} + +func _GRPCBroker_StartStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(GRPCBrokerServer).StartStream(&gRPCBrokerStartStreamServer{stream}) +} + +type GRPCBroker_StartStreamServer interface { + Send(*ConnInfo) error + Recv() (*ConnInfo, error) + grpc.ServerStream +} + +type gRPCBrokerStartStreamServer struct { + grpc.ServerStream +} + +func (x *gRPCBrokerStartStreamServer) Send(m *ConnInfo) error { + return x.ServerStream.SendMsg(m) +} + +func (x *gRPCBrokerStartStreamServer) Recv() (*ConnInfo, error) { + m := new(ConnInfo) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _GRPCBroker_serviceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.GRPCBroker", + HandlerType: (*GRPCBrokerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StartStream", + Handler: _GRPCBroker_StartStream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc_broker.proto", +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto new file mode 100644 index 0000000000..3fa79e8ac0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; +package plugin; +option go_package = "plugin"; + +message ConnInfo { + uint32 service_id = 1; + string network = 2; + string address = 3; +} + +service GRPCBroker { + rpc StartStream(stream ConnInfo) returns (stream ConnInfo); +} + + diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go new file mode 100644 index 0000000000..38b4204326 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go @@ -0,0 +1,143 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_controller.proto + +package plugin + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Empty struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { + return fileDescriptor_23c2c7e42feab570, []int{0} +} + +func (m *Empty) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Empty.Unmarshal(m, b) +} +func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Empty.Marshal(b, m, deterministic) +} +func (m *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(m, src) +} +func (m *Empty) XXX_Size() int { + return xxx_messageInfo_Empty.Size(m) +} +func (m *Empty) XXX_DiscardUnknown() { + xxx_messageInfo_Empty.DiscardUnknown(m) +} + +var xxx_messageInfo_Empty proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Empty)(nil), "plugin.Empty") +} + +func init() { proto.RegisterFile("grpc_controller.proto", fileDescriptor_23c2c7e42feab570) } + +var fileDescriptor_23c2c7e42feab570 = []byte{ + // 108 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4d, 0x2f, 0x2a, 0x48, + 0x8e, 0x4f, 0xce, 0xcf, 0x2b, 0x29, 0xca, 0xcf, 0xc9, 0x49, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, + 0xc9, 0x17, 0x62, 0x2b, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x62, 0xe7, 0x62, 0x75, 0xcd, 0x2d, + 0x28, 0xa9, 0x34, 0xb2, 0xe2, 0xe2, 0x73, 0x0f, 0x0a, 0x70, 0x76, 0x86, 0x2b, 0x14, 0xd2, 0xe0, + 0xe2, 0x08, 0xce, 0x28, 0x2d, 0x49, 0xc9, 0x2f, 0xcf, 0x13, 0xe2, 0xd5, 0x83, 0xa8, 0xd7, 0x03, + 0x2b, 0x96, 0x42, 0xe5, 0x3a, 0x71, 0x44, 0x41, 0x8d, 0x4b, 0x62, 0x03, 0x9b, 0x6e, 0x0c, 0x08, + 0x00, 0x00, 0xff, 0xff, 0xab, 0x7c, 0x27, 0xe5, 0x76, 0x00, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// GRPCControllerClient is the client API for GRPCController service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GRPCControllerClient interface { + Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) +} + +type gRPCControllerClient struct { + cc *grpc.ClientConn +} + +func NewGRPCControllerClient(cc *grpc.ClientConn) GRPCControllerClient { + return &gRPCControllerClient{cc} +} + +func (c *gRPCControllerClient) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, "/plugin.GRPCController/Shutdown", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// GRPCControllerServer is the server API for GRPCController service. +type GRPCControllerServer interface { + Shutdown(context.Context, *Empty) (*Empty, error) +} + +func RegisterGRPCControllerServer(s *grpc.Server, srv GRPCControllerServer) { + s.RegisterService(&_GRPCController_serviceDesc, srv) +} + +func _GRPCController_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GRPCControllerServer).Shutdown(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/plugin.GRPCController/Shutdown", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GRPCControllerServer).Shutdown(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +var _GRPCController_serviceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.GRPCController", + HandlerType: (*GRPCControllerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Shutdown", + Handler: _GRPCController_Shutdown_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "grpc_controller.proto", +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto new file mode 100644 index 0000000000..345d0a1c1f --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; +package plugin; +option go_package = "plugin"; + +message Empty { +} + +// The GRPCController is responsible for telling the plugin server to shutdown. +service GRPCController { + rpc Shutdown(Empty) returns (Empty); +} diff --git a/vendor/github.com/hashicorp/go-plugin/log_entry.go b/vendor/github.com/hashicorp/go-plugin/log_entry.go index 2996c14c3c..fb2ef930ca 100644 --- a/vendor/github.com/hashicorp/go-plugin/log_entry.go +++ b/vendor/github.com/hashicorp/go-plugin/log_entry.go @@ -32,11 +32,11 @@ func flattenKVPairs(kvs []*logEntryKV) []interface{} { } // parseJSON handles parsing JSON output -func parseJSON(input string) (*logEntry, error) { +func parseJSON(input []byte) (*logEntry, error) { var raw map[string]interface{} entry := &logEntry{} - err := json.Unmarshal([]byte(input), &raw) + err := json.Unmarshal(input, &raw) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/go-plugin/mtls.go b/vendor/github.com/hashicorp/go-plugin/mtls.go new file mode 100644 index 0000000000..8895524587 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/mtls.go @@ -0,0 +1,73 @@ +package plugin + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "time" +) + +// generateCert generates a temporary certificate for plugin authentication. The +// certificate and private key are returns in PEM format. +func generateCert() (cert []byte, privateKey []byte, err error) { + key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + return nil, nil, err + } + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + sn, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return nil, nil, err + } + + host := "localhost" + + template := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + Organization: []string{"HashiCorp"}, + }, + DNSNames: []string{host}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, + x509.ExtKeyUsageServerAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + SerialNumber: sn, + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + IsCA: true, + } + + der, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key) + if err != nil { + return nil, nil, err + } + + var certOut bytes.Buffer + if err := pem.Encode(&certOut, &pem.Block{Type: "CERTIFICATE", Bytes: der}); err != nil { + return nil, nil, err + } + + keyBytes, err := x509.MarshalECPrivateKey(key) + if err != nil { + return nil, nil, err + } + + var keyOut bytes.Buffer + if err := pem.Encode(&keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes}); err != nil { + return nil, nil, err + } + + cert = certOut.Bytes() + privateKey = keyOut.Bytes() + + return cert, privateKey, nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/plugin.go b/vendor/github.com/hashicorp/go-plugin/plugin.go index 6b7bdd1cfd..79d9674633 100644 --- a/vendor/github.com/hashicorp/go-plugin/plugin.go +++ b/vendor/github.com/hashicorp/go-plugin/plugin.go @@ -9,6 +9,7 @@ package plugin import ( + "context" "errors" "net/rpc" @@ -33,11 +34,12 @@ type GRPCPlugin interface { // GRPCServer should register this plugin for serving with the // given GRPCServer. Unlike Plugin.Server, this is only called once // since gRPC plugins serve singletons. - GRPCServer(*grpc.Server) error + GRPCServer(*GRPCBroker, *grpc.Server) error // GRPCClient should return the interface implementation for the plugin - // you're serving via gRPC. - GRPCClient(*grpc.ClientConn) (interface{}, error) + // you're serving via gRPC. The provided context will be canceled by + // go-plugin in the event of the plugin process exiting. + GRPCClient(context.Context, *GRPCBroker, *grpc.ClientConn) (interface{}, error) } // NetRPCUnsupportedPlugin implements Plugin but returns errors for the diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go index e1543214a5..fc9f05a9fb 100644 --- a/vendor/github.com/hashicorp/go-plugin/server.go +++ b/vendor/github.com/hashicorp/go-plugin/server.go @@ -2,6 +2,7 @@ package plugin import ( "crypto/tls" + "crypto/x509" "encoding/base64" "errors" "fmt" @@ -11,7 +12,9 @@ import ( "os" "os/signal" "runtime" + "sort" "strconv" + "strings" "sync/atomic" "github.com/hashicorp/go-hclog" @@ -36,6 +39,8 @@ type HandshakeConfig struct { // ProtocolVersion is the version that clients must match on to // agree they can communicate. This should match the ProtocolVersion // set on ClientConfig when using a plugin. + // This field is not required if VersionedPlugins are being used in the + // Client or Server configurations. ProtocolVersion uint // MagicCookieKey and value are used as a very basic verification @@ -46,6 +51,10 @@ type HandshakeConfig struct { MagicCookieValue string } +// PluginSet is a set of plugins provided to be registered in the plugin +// server. +type PluginSet map[string]Plugin + // ServeConfig configures what sorts of plugins are served. type ServeConfig struct { // HandshakeConfig is the configuration that must match clients. @@ -55,7 +64,13 @@ type ServeConfig struct { TLSProvider func() (*tls.Config, error) // Plugins are the plugins that are served. - Plugins map[string]Plugin + // The implied version of this PluginSet is the Handshake.ProtocolVersion. + Plugins PluginSet + + // VersionedPlugins is a map of PluginSets for specific protocol versions. + // These can be used to negotiate a compatible version between client and + // server. If this is set, Handshake.ProtocolVersion is not required. + VersionedPlugins map[int]PluginSet // GRPCServer should be non-nil to enable serving the plugins over // gRPC. This is a function to create the server when needed with the @@ -66,16 +81,89 @@ type ServeConfig struct { // the gRPC health checking service. This is not optional since go-plugin // relies on this to implement Ping(). GRPCServer func([]grpc.ServerOption) *grpc.Server + + // Logger is used to pass a logger into the server. If none is provided the + // server will create a default logger. + Logger hclog.Logger } -// Protocol returns the protocol that this server should speak. -func (c *ServeConfig) Protocol() Protocol { - result := ProtocolNetRPC - if c.GRPCServer != nil { - result = ProtocolGRPC +// protocolVersion determines the protocol version and plugin set to be used by +// the server. In the event that there is no suitable version, the last version +// in the config is returned leaving the client to report the incompatibility. +func protocolVersion(opts *ServeConfig) (int, Protocol, PluginSet) { + protoVersion := int(opts.ProtocolVersion) + pluginSet := opts.Plugins + protoType := ProtocolNetRPC + // Check if the client sent a list of acceptable versions + var clientVersions []int + if vs := os.Getenv("PLUGIN_PROTOCOL_VERSIONS"); vs != "" { + for _, s := range strings.Split(vs, ",") { + v, err := strconv.Atoi(s) + if err != nil { + fmt.Fprintf(os.Stderr, "server sent invalid plugin version %q", s) + continue + } + clientVersions = append(clientVersions, v) + } + } + + // We want to iterate in reverse order, to ensure we match the newest + // compatible plugin version. + sort.Sort(sort.Reverse(sort.IntSlice(clientVersions))) + + // set the old un-versioned fields as if they were versioned plugins + if opts.VersionedPlugins == nil { + opts.VersionedPlugins = make(map[int]PluginSet) + } + + if pluginSet != nil { + opts.VersionedPlugins[protoVersion] = pluginSet } - return result + // Sort the version to make sure we match the latest first + var versions []int + for v := range opts.VersionedPlugins { + versions = append(versions, v) + } + + sort.Sort(sort.Reverse(sort.IntSlice(versions))) + + // See if we have multiple versions of Plugins to choose from + for _, version := range versions { + // Record each version, since we guarantee that this returns valid + // values even if they are not a protocol match. + protoVersion = version + pluginSet = opts.VersionedPlugins[version] + + // If we have a configured gRPC server we should select a protocol + if opts.GRPCServer != nil { + // All plugins in a set must use the same transport, so check the first + // for the protocol type + for _, p := range pluginSet { + switch p.(type) { + case GRPCPlugin: + protoType = ProtocolGRPC + default: + protoType = ProtocolNetRPC + } + break + } + } + + for _, clientVersion := range clientVersions { + if clientVersion == protoVersion { + return protoVersion, protoType, pluginSet + } + } + } + + // Return the lowest version as the fallback. + // Since we iterated over all the versions in reverse order above, these + // values are from the lowest version number plugins (which may be from + // a combination of the Handshake.ProtocolVersion and ServeConfig.Plugins + // fields). This allows serving the oldest version of our plugins to a + // legacy client that did not send a PLUGIN_PROTOCOL_VERSIONS list. + return protoVersion, protoType, pluginSet } // Serve serves the plugins given by ServeConfig. @@ -103,15 +191,22 @@ func Serve(opts *ServeConfig) { os.Exit(1) } + // negotiate the version and plugins + // start with default version in the handshake config + protoVersion, protoType, pluginSet := protocolVersion(opts) + // Logging goes to the original stderr log.SetOutput(os.Stderr) - // internal logger to os.Stderr - logger := hclog.New(&hclog.LoggerOptions{ - Level: hclog.Trace, - Output: os.Stderr, - JSONFormat: true, - }) + logger := opts.Logger + if logger == nil { + // internal logger to os.Stderr + logger = hclog.New(&hclog.LoggerOptions{ + Level: hclog.Trace, + Output: os.Stderr, + JSONFormat: true, + }) + } // Create our new stdout, stderr files. These will override our built-in // stdout/stderr so that it works across the stream boundary. @@ -148,12 +243,47 @@ func Serve(opts *ServeConfig) { } } + var serverCert string + clientCert := os.Getenv("PLUGIN_CLIENT_CERT") + // If the client is configured using AutoMTLS, the certificate will be here, + // and we need to generate our own in response. + if tlsConfig == nil && clientCert != "" { + logger.Info("configuring server automatic mTLS") + clientCertPool := x509.NewCertPool() + if !clientCertPool.AppendCertsFromPEM([]byte(clientCert)) { + logger.Error("client cert provided but failed to parse", "cert", clientCert) + } + + certPEM, keyPEM, err := generateCert() + if err != nil { + logger.Error("failed to generate client certificate", "error", err) + panic(err) + } + + cert, err := tls.X509KeyPair(certPEM, keyPEM) + if err != nil { + logger.Error("failed to parse client certificate", "error", err) + panic(err) + } + + tlsConfig = &tls.Config{ + Certificates: []tls.Certificate{cert}, + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: clientCertPool, + MinVersion: tls.VersionTLS12, + } + + // We send back the raw leaf cert data for the client rather than the + // PEM, since the protocol can't handle newlines. + serverCert = base64.RawStdEncoding.EncodeToString(cert.Certificate[0]) + } + // Create the channel to tell us when we're done doneCh := make(chan struct{}) // Build the server type var server ServerProtocol - switch opts.Protocol() { + switch protoType { case ProtocolNetRPC: // If we have a TLS configuration then we wrap the listener // ourselves and do it at that level. @@ -163,7 +293,7 @@ func Serve(opts *ServeConfig) { // Create the RPC server to dispense server = &RPCServer{ - Plugins: opts.Plugins, + Plugins: pluginSet, Stdout: stdout_r, Stderr: stderr_r, DoneCh: doneCh, @@ -172,16 +302,17 @@ func Serve(opts *ServeConfig) { case ProtocolGRPC: // Create the gRPC server server = &GRPCServer{ - Plugins: opts.Plugins, + Plugins: pluginSet, Server: opts.GRPCServer, TLS: tlsConfig, Stdout: stdout_r, Stderr: stderr_r, DoneCh: doneCh, + logger: logger, } default: - panic("unknown server protocol: " + opts.Protocol()) + panic("unknown server protocol: " + protoType) } // Initialize the servers @@ -190,25 +321,16 @@ func Serve(opts *ServeConfig) { return } - // Build the extra configuration - extra := "" - if v := server.Config(); v != "" { - extra = base64.StdEncoding.EncodeToString([]byte(v)) - } - if extra != "" { - extra = "|" + extra - } - logger.Debug("plugin address", "network", listener.Addr().Network(), "address", listener.Addr().String()) - // Output the address and service name to stdout so that core can bring it up. - fmt.Printf("%d|%d|%s|%s|%s%s\n", + // Output the address and service name to stdout so that the client can bring it up. + fmt.Printf("%d|%d|%s|%s|%s|%s\n", CoreProtocolVersion, - opts.ProtocolVersion, + protoVersion, listener.Addr().Network(), listener.Addr().String(), - opts.Protocol(), - extra) + protoType, + serverCert) os.Stdout.Sync() // Eat the interrupts diff --git a/vendor/github.com/hashicorp/go-plugin/testing.go b/vendor/github.com/hashicorp/go-plugin/testing.go index c6bf7c4ed3..2cf2c26cc5 100644 --- a/vendor/github.com/hashicorp/go-plugin/testing.go +++ b/vendor/github.com/hashicorp/go-plugin/testing.go @@ -2,13 +2,29 @@ package plugin import ( "bytes" + "context" + "io" "net" "net/rpc" "github.com/mitchellh/go-testing-interface" + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/plugin" "google.golang.org/grpc" ) +// TestOptions allows specifying options that can affect the behavior of the +// test functions +type TestOptions struct { + //ServerStdout causes the given value to be used in place of a blank buffer + //for RPCServer's Stdout + ServerStdout io.ReadCloser + + //ServerStderr causes the given value to be used in place of a blank buffer + //for RPCServer's Stderr + ServerStderr io.ReadCloser +} + // The testing file contains test helpers that you can use outside of // this package for making it easier to test plugins themselves. @@ -60,12 +76,20 @@ func TestRPCConn(t testing.T) (*rpc.Client, *rpc.Server) { // TestPluginRPCConn returns a plugin RPC client and server that are connected // together and configured. -func TestPluginRPCConn(t testing.T, ps map[string]Plugin) (*RPCClient, *RPCServer) { +func TestPluginRPCConn(t testing.T, ps map[string]Plugin, opts *TestOptions) (*RPCClient, *RPCServer) { // Create two net.Conns we can use to shuttle our control connection clientConn, serverConn := TestConn(t) // Start up the server server := &RPCServer{Plugins: ps, Stdout: new(bytes.Buffer), Stderr: new(bytes.Buffer)} + if opts != nil { + if opts.ServerStdout != nil { + server.Stdout = opts.ServerStdout + } + if opts.ServerStderr != nil { + server.Stderr = opts.ServerStderr + } + } go server.ServeConn(serverConn) // Connect the client to the server @@ -77,6 +101,35 @@ func TestPluginRPCConn(t testing.T, ps map[string]Plugin) (*RPCClient, *RPCServe return client, server } +// TestGRPCConn returns a gRPC client conn and grpc server that are connected +// together and configured. The register function is used to register services +// prior to the Serve call. This is used to test gRPC connections. +func TestGRPCConn(t testing.T, register func(*grpc.Server)) (*grpc.ClientConn, *grpc.Server) { + // Create a listener + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + server := grpc.NewServer() + register(server) + go server.Serve(l) + + // Connect to the server + conn, err := grpc.Dial( + l.Addr().String(), + grpc.WithBlock(), + grpc.WithInsecure()) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Connection successful, close the listener + l.Close() + + return conn, server +} + // TestPluginGRPCConn returns a plugin gRPC client and server that are connected // together and configured. This is used to test gRPC connections. func TestPluginGRPCConn(t testing.T, ps map[string]Plugin) (*GRPCClient, *GRPCServer) { @@ -89,9 +142,11 @@ func TestPluginGRPCConn(t testing.T, ps map[string]Plugin) (*GRPCClient, *GRPCSe // Start up the server server := &GRPCServer{ Plugins: ps, + DoneCh: make(chan struct{}), Server: DefaultGRPCServer, Stdout: new(bytes.Buffer), Stderr: new(bytes.Buffer), + logger: hclog.Default(), } if err := server.Init(); err != nil { t.Fatalf("err: %s", err) @@ -107,13 +162,18 @@ func TestPluginGRPCConn(t testing.T, ps map[string]Plugin) (*GRPCClient, *GRPCSe t.Fatalf("err: %s", err) } - // Connection successful, close the listener - l.Close() + brokerGRPCClient := newGRPCBrokerClient(conn) + broker := newGRPCBroker(brokerGRPCClient, nil) + go broker.Run() + go brokerGRPCClient.StartStream() // Create the client client := &GRPCClient{ - Conn: conn, - Plugins: ps, + Conn: conn, + Plugins: ps, + broker: broker, + doneCtx: context.Background(), + controller: plugin.NewGRPCControllerClient(conn), } return client, server diff --git a/vendor/github.com/hashicorp/go-safetemp/LICENSE b/vendor/github.com/hashicorp/go-safetemp/LICENSE new file mode 100644 index 0000000000..be2cc4dfb6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-safetemp/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-safetemp/README.md b/vendor/github.com/hashicorp/go-safetemp/README.md new file mode 100644 index 0000000000..02ece33171 --- /dev/null +++ b/vendor/github.com/hashicorp/go-safetemp/README.md @@ -0,0 +1,10 @@ +# go-safetemp +[![Godoc](https://godoc.org/github.com/hashcorp/go-safetemp?status.svg)](https://godoc.org/github.com/hashicorp/go-safetemp) + +Functions for safely working with temporary directories and files. + +## Why? + +The Go standard library provides the excellent `ioutil` package for +working with temporary directories and files. This library builds on top +of that to provide safe abstractions above that. diff --git a/vendor/github.com/hashicorp/go-safetemp/go.mod b/vendor/github.com/hashicorp/go-safetemp/go.mod new file mode 100644 index 0000000000..02bc5f5bb5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-safetemp/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/go-safetemp diff --git a/vendor/github.com/hashicorp/go-safetemp/safetemp.go b/vendor/github.com/hashicorp/go-safetemp/safetemp.go new file mode 100644 index 0000000000..c4ae72b789 --- /dev/null +++ b/vendor/github.com/hashicorp/go-safetemp/safetemp.go @@ -0,0 +1,40 @@ +package safetemp + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" +) + +// Dir creates a new temporary directory that isn't yet created. This +// can be used with calls that expect a non-existent directory. +// +// The directory is created as a child of a temporary directory created +// within the directory dir starting with prefix. The temporary directory +// returned is always named "temp". The parent directory has the specified +// prefix. +// +// The returned io.Closer should be used to clean up the returned directory. +// This will properly remove the returned directory and any other temporary +// files created. +// +// If an error is returned, the Closer does not need to be called (and will +// be nil). +func Dir(dir, prefix string) (string, io.Closer, error) { + // Create the temporary directory + td, err := ioutil.TempDir(dir, prefix) + if err != nil { + return "", nil, err + } + + return filepath.Join(td, "temp"), pathCloser(td), nil +} + +// pathCloser implements io.Closer to remove the given path on Close. +type pathCloser string + +// Close deletes this path. +func (p pathCloser) Close() error { + return os.RemoveAll(string(p)) +} diff --git a/vendor/github.com/hashicorp/go-uuid/.travis.yml b/vendor/github.com/hashicorp/go-uuid/.travis.yml new file mode 100644 index 0000000000..769849071e --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/.travis.yml @@ -0,0 +1,12 @@ +language: go + +sudo: false + +go: + - 1.4 + - 1.5 + - 1.6 + - tip + +script: + - go test -bench . -benchmem -v ./... diff --git a/vendor/github.com/hashicorp/go-uuid/README.md b/vendor/github.com/hashicorp/go-uuid/README.md index 21fdda4ada..fbde8b9aef 100644 --- a/vendor/github.com/hashicorp/go-uuid/README.md +++ b/vendor/github.com/hashicorp/go-uuid/README.md @@ -1,6 +1,6 @@ -# uuid +# uuid [![Build Status](https://travis-ci.org/hashicorp/go-uuid.svg?branch=master)](https://travis-ci.org/hashicorp/go-uuid) -Generates UUID-format strings using purely high quality random bytes. +Generates UUID-format strings using high quality, _purely random_ bytes. It is **not** intended to be RFC compliant, merely to use a well-understood string representation of a 128-bit value. It can also parse UUID-format strings into their component bytes. Documentation ============= diff --git a/vendor/github.com/hashicorp/go-uuid/go.mod b/vendor/github.com/hashicorp/go-uuid/go.mod new file mode 100644 index 0000000000..dd57f9d21a --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/go-uuid diff --git a/vendor/github.com/hashicorp/go-uuid/uuid.go b/vendor/github.com/hashicorp/go-uuid/uuid.go index 322b522c23..911227f612 100644 --- a/vendor/github.com/hashicorp/go-uuid/uuid.go +++ b/vendor/github.com/hashicorp/go-uuid/uuid.go @@ -6,22 +6,32 @@ import ( "fmt" ) -// GenerateUUID is used to generate a random UUID -func GenerateUUID() (string, error) { - buf := make([]byte, 16) +// GenerateRandomBytes is used to generate random bytes of given size. +func GenerateRandomBytes(size int) ([]byte, error) { + buf := make([]byte, size) if _, err := rand.Read(buf); err != nil { - return "", fmt.Errorf("failed to read random bytes: %v", err) + return nil, fmt.Errorf("failed to read random bytes: %v", err) } + return buf, nil +} +const uuidLen = 16 + +// GenerateUUID is used to generate a random UUID +func GenerateUUID() (string, error) { + buf, err := GenerateRandomBytes(uuidLen) + if err != nil { + return "", err + } return FormatUUID(buf) } func FormatUUID(buf []byte) (string, error) { - if len(buf) != 16 { - return "", fmt.Errorf("wrong length byte slice (%d)", len(buf)) + if buflen := len(buf); buflen != uuidLen { + return "", fmt.Errorf("wrong length byte slice (%d)", buflen) } - return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", + return fmt.Sprintf("%x-%x-%x-%x-%x", buf[0:4], buf[4:6], buf[6:8], @@ -30,16 +40,14 @@ func FormatUUID(buf []byte) (string, error) { } func ParseUUID(uuid string) ([]byte, error) { - if len(uuid) != 36 { + if len(uuid) != 2 * uuidLen + 4 { return nil, fmt.Errorf("uuid string is wrong length") } - hyph := []byte("-") - - if uuid[8] != hyph[0] || - uuid[13] != hyph[0] || - uuid[18] != hyph[0] || - uuid[23] != hyph[0] { + if uuid[8] != '-' || + uuid[13] != '-' || + uuid[18] != '-' || + uuid[23] != '-' { return nil, fmt.Errorf("uuid is improperly formatted") } @@ -49,7 +57,7 @@ func ParseUUID(uuid string) ([]byte, error) { if err != nil { return nil, err } - if len(ret) != 16 { + if len(ret) != uuidLen { return nil, fmt.Errorf("decoded hex is the wrong length") } diff --git a/vendor/github.com/hashicorp/go-version/.travis.yml b/vendor/github.com/hashicorp/go-version/.travis.yml index 9f30eecd73..542ca8b7f3 100644 --- a/vendor/github.com/hashicorp/go-version/.travis.yml +++ b/vendor/github.com/hashicorp/go-version/.travis.yml @@ -6,6 +6,8 @@ go: - 1.2 - 1.3 - 1.4 + - 1.9 + - "1.10" script: - go test diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go index 8c73df0602..d055759611 100644 --- a/vendor/github.com/hashicorp/go-version/constraint.go +++ b/vendor/github.com/hashicorp/go-version/constraint.go @@ -2,6 +2,7 @@ package version import ( "fmt" + "reflect" "regexp" "strings" ) @@ -113,6 +114,26 @@ func parseSingle(v string) (*Constraint, error) { }, nil } +func prereleaseCheck(v, c *Version) bool { + switch vPre, cPre := v.Prerelease() != "", c.Prerelease() != ""; { + case cPre && vPre: + // A constraint with a pre-release can only match a pre-release version + // with the same base segments. + return reflect.DeepEqual(c.Segments64(), v.Segments64()) + + case !cPre && vPre: + // A constraint without a pre-release can only match a version without a + // pre-release. + return false + + case cPre && !vPre: + // OK, except with the pessimistic operator + case !cPre && !vPre: + // OK + } + return true +} + //------------------------------------------------------------------- // Constraint functions //------------------------------------------------------------------- @@ -126,22 +147,27 @@ func constraintNotEqual(v, c *Version) bool { } func constraintGreaterThan(v, c *Version) bool { - return v.Compare(c) == 1 + return prereleaseCheck(v, c) && v.Compare(c) == 1 } func constraintLessThan(v, c *Version) bool { - return v.Compare(c) == -1 + return prereleaseCheck(v, c) && v.Compare(c) == -1 } func constraintGreaterThanEqual(v, c *Version) bool { - return v.Compare(c) >= 0 + return prereleaseCheck(v, c) && v.Compare(c) >= 0 } func constraintLessThanEqual(v, c *Version) bool { - return v.Compare(c) <= 0 + return prereleaseCheck(v, c) && v.Compare(c) <= 0 } func constraintPessimistic(v, c *Version) bool { + // Using a pessimistic constraint with a pre-release, restricts versions to pre-releases + if !prereleaseCheck(v, c) || (c.Prerelease() != "" && v.Prerelease() == "") { + return false + } + // If the version being checked is naturally less than the constraint, then there // is no way for the version to be valid against the constraint if v.LessThan(c) { diff --git a/vendor/github.com/hashicorp/go-version/go.mod b/vendor/github.com/hashicorp/go-version/go.mod new file mode 100644 index 0000000000..f5285555fa --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/go-version diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go index ae2f6b63a7..186fd7cc1f 100644 --- a/vendor/github.com/hashicorp/go-version/version.go +++ b/vendor/github.com/hashicorp/go-version/version.go @@ -10,14 +10,25 @@ import ( ) // The compiled regular expression used to test the validity of a version. -var versionRegexp *regexp.Regexp +var ( + versionRegexp *regexp.Regexp + semverRegexp *regexp.Regexp +) // The raw regular expression string used for testing the validity // of a version. -const VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + - `(-?([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + - `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + - `?` +const ( + VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-?([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + + `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + + `?` + + // SemverRegexpRaw requires a separator between version and prerelease + SemverRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + + `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + + `?` +) // Version represents a single version. type Version struct { @@ -25,16 +36,29 @@ type Version struct { pre string segments []int64 si int + original string } func init() { versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") + semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$") } // NewVersion parses the given version and returns a new // Version. func NewVersion(v string) (*Version, error) { - matches := versionRegexp.FindStringSubmatch(v) + return newVersion(v, versionRegexp) +} + +// NewSemver parses the given version and returns a new +// Version that adheres strictly to SemVer specs +// https://semver.org/ +func NewSemver(v string) (*Version, error) { + return newVersion(v, semverRegexp) +} + +func newVersion(v string, pattern *regexp.Regexp) (*Version, error) { + matches := pattern.FindStringSubmatch(v) if matches == nil { return nil, fmt.Errorf("Malformed version: %s", v) } @@ -59,11 +83,17 @@ func NewVersion(v string) (*Version, error) { segments = append(segments, 0) } + pre := matches[7] + if pre == "" { + pre = matches[4] + } + return &Version{ - metadata: matches[7], - pre: matches[4], + metadata: matches[10], + pre: pre, segments: segments, si: si, + original: v, }, nil } @@ -166,24 +196,42 @@ func comparePart(preSelf string, preOther string) int { return 0 } + var selfInt int64 + selfNumeric := true + selfInt, err := strconv.ParseInt(preSelf, 10, 64) + if err != nil { + selfNumeric = false + } + + var otherInt int64 + otherNumeric := true + otherInt, err = strconv.ParseInt(preOther, 10, 64) + if err != nil { + otherNumeric = false + } + // if a part is empty, we use the other to decide if preSelf == "" { - _, notIsNumeric := strconv.ParseInt(preOther, 10, 64) - if notIsNumeric == nil { + if otherNumeric { return -1 } return 1 } if preOther == "" { - _, notIsNumeric := strconv.ParseInt(preSelf, 10, 64) - if notIsNumeric == nil { + if selfNumeric { return 1 } return -1 } - if preSelf > preOther { + if selfNumeric && !otherNumeric { + return -1 + } else if !selfNumeric && otherNumeric { + return 1 + } else if !selfNumeric && !otherNumeric && preSelf > preOther { + return 1 + } else if selfInt > otherInt { return 1 } @@ -283,11 +331,19 @@ func (v *Version) Segments() []int { // for a version "1.2.3-beta", segments will return a slice of // 1, 2, 3. func (v *Version) Segments64() []int64 { - return v.segments + result := make([]int64, len(v.segments)) + copy(result, v.segments) + return result } // String returns the full version string included pre-release // and metadata information. +// +// This value is rebuilt according to the parsed segments and other +// information. Therefore, ambiguities in the version string such as +// prefixed zeroes (1.04.0 => 1.4.0), `v` prefix (v1.0.0 => 1.0.0), and +// missing parts (1.0 => 1.0.0) will be made into a canonicalized form +// as shown in the parenthesized examples. func (v *Version) String() string { var buf bytes.Buffer fmtParts := make([]string, len(v.segments)) @@ -306,3 +362,9 @@ func (v *Version) String() string { return buf.String() } + +// Original returns the original parsed version as-is, including any +// potential whitespace, `v` prefix, etc. +func (v *Version) Original() string { + return v.original +} diff --git a/vendor/github.com/hashicorp/hcl2/ext/dynblock/README.md b/vendor/github.com/hashicorp/hcl2/ext/dynblock/README.md new file mode 100644 index 0000000000..2b24fdbe87 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/ext/dynblock/README.md @@ -0,0 +1,184 @@ +# HCL Dynamic Blocks Extension + +This HCL extension implements a special block type named "dynamic" that can +be used to dynamically generate blocks of other types by iterating over +collection values. + +Normally the block structure in an HCL configuration file is rigid, even +though dynamic expressions can be used within attribute values. This is +convenient for most applications since it allows the overall structure of +the document to be decoded easily, but in some applications it is desirable +to allow dynamic block generation within certain portions of the configuration. + +Dynamic block generation is performed using the `dynamic` block type: + +```hcl +toplevel { + nested { + foo = "static block 1" + } + + dynamic "nested" { + for_each = ["a", "b", "c"] + iterator = nested + content { + foo = "dynamic block ${nested.value}" + } + } + + nested { + foo = "static block 2" + } +} +``` + +The above is interpreted as if it were written as follows: + +```hcl +toplevel { + nested { + foo = "static block 1" + } + + nested { + foo = "dynamic block a" + } + + nested { + foo = "dynamic block b" + } + + nested { + foo = "dynamic block c" + } + + nested { + foo = "static block 2" + } +} +``` + +Since HCL block syntax is not normally exposed to the possibility of unknown +values, this extension must make some compromises when asked to iterate over +an unknown collection. If the length of the collection cannot be statically +recognized (because it is an unknown value of list, map, or set type) then +the `dynamic` construct will generate a _single_ dynamic block whose iterator +key and value are both unknown values of the dynamic pseudo-type, thus causing +any attribute values derived from iteration to appear as unknown values. There +is no explicit representation of the fact that the length of the collection may +eventually be different than one. + +## Usage + +Pass a body to function `Expand` to obtain a new body that will, on access +to its content, evaluate and expand any nested `dynamic` blocks. +Dynamic block processing is also automatically propagated into any nested +blocks that are returned, allowing users to nest dynamic blocks inside +one another and to nest dynamic blocks inside other static blocks. + +HCL structural decoding does not normally have access to an `EvalContext`, so +any variables and functions that should be available to the `for_each` +and `labels` expressions must be passed in when calling `Expand`. Expressions +within the `content` block are evaluated separately and so can be passed a +separate `EvalContext` if desired, during normal attribute expression +evaluation. + +## Detecting Variables + +Some applications dynamically generate an `EvalContext` by analyzing which +variables are referenced by an expression before evaluating it. + +This unfortunately requires some extra effort when this analysis is required +for the context passed to `Expand`: the HCL API requires a schema to be +provided in order to do any analysis of the blocks in a body, but the low-level +schema model provides a description of only one level of nested blocks at +a time, and thus a new schema must be provided for each additional level of +nesting. + +To make this arduous process as convenient as possbile, this package provides +a helper function `WalkForEachVariables`, which returns a `WalkVariablesNode` +instance that can be used to find variables directly in a given body and also +determine which nested blocks require recursive calls. Using this mechanism +requires that the caller be able to look up a schema given a nested block type. +For _simple_ formats where a specific block type name always has the same schema +regardless of context, a walk can be implemented as follows: + +```go +func walkVariables(node dynblock.WalkVariablesNode, schema *hcl.BodySchema) []hcl.Traversal { + vars, children := node.Visit(schema) + + for _, child := range children { + var childSchema *hcl.BodySchema + switch child.BlockTypeName { + case "a": + childSchema = &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "b", + LabelNames: []string{"key"}, + }, + }, + } + case "b": + childSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "val", + Required: true, + }, + }, + } + default: + // Should never happen, because the above cases should be exhaustive + // for the application's configuration format. + panic(fmt.Errorf("can't find schema for unknown block type %q", child.BlockTypeName)) + } + + vars = append(vars, testWalkAndAccumVars(child.Node, childSchema)...) + } +} +``` + +### Detecting Variables with `hcldec` Specifications + +For applications that use the higher-level `hcldec` package to decode nested +configuration structures into `cty` values, the same specification can be used +to automatically drive the recursive variable-detection walk described above. + +The helper function `ForEachVariablesHCLDec` allows an entire recursive +configuration structure to be analyzed in a single call given a `hcldec.Spec` +that describes the nested block structure. This means a `hcldec`-based +application can support dynamic blocks with only a little additional effort: + +```go +func decodeBody(body hcl.Body, spec hcldec.Spec) (cty.Value, hcl.Diagnostics) { + // Determine which variables are needed to expand dynamic blocks + neededForDynamic := dynblock.ForEachVariablesHCLDec(body, spec) + + // Build a suitable EvalContext and expand dynamic blocks + dynCtx := buildEvalContext(neededForDynamic) + dynBody := dynblock.Expand(body, dynCtx) + + // Determine which variables are needed to fully decode the expanded body + // This will analyze expressions that came both from static blocks in the + // original body and from blocks that were dynamically added by Expand. + neededForDecode := hcldec.Variables(dynBody, spec) + + // Build a suitable EvalContext and then fully decode the body as per the + // hcldec specification. + decCtx := buildEvalContext(neededForDecode) + return hcldec.Decode(dynBody, spec, decCtx) +} + +func buildEvalContext(needed []hcl.Traversal) *hcl.EvalContext { + // (to be implemented by your application) +} +``` + +# Performance + +This extension is going quite harshly against the grain of the HCL API, and +so it uses lots of wrapping objects and temporary data structures to get its +work done. HCL in general is not suitable for use in high-performance situations +or situations sensitive to memory pressure, but that is _especially_ true for +this extension. diff --git a/vendor/github.com/hashicorp/hcl2/ext/dynblock/expand_body.go b/vendor/github.com/hashicorp/hcl2/ext/dynblock/expand_body.go new file mode 100644 index 0000000000..97d1e4dd5b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/ext/dynblock/expand_body.go @@ -0,0 +1,252 @@ +package dynblock + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +// expandBody wraps another hcl.Body and expands any "dynamic" blocks found +// inside whenever Content or PartialContent is called. +type expandBody struct { + original hcl.Body + forEachCtx *hcl.EvalContext + iteration *iteration // non-nil if we're nested inside another "dynamic" block + + // These are used with PartialContent to produce a "remaining items" + // body to return. They are nil on all bodies fresh out of the transformer. + // + // Note that this is re-implemented here rather than delegating to the + // existing support required by the underlying body because we need to + // retain access to the entire original body on subsequent decode operations + // so we can retain any "dynamic" blocks for types we didn't take consume + // on the first pass. + hiddenAttrs map[string]struct{} + hiddenBlocks map[string]hcl.BlockHeaderSchema +} + +func (b *expandBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + extSchema := b.extendSchema(schema) + rawContent, diags := b.original.Content(extSchema) + + blocks, blockDiags := b.expandBlocks(schema, rawContent.Blocks, false) + diags = append(diags, blockDiags...) + attrs := b.prepareAttributes(rawContent.Attributes) + + content := &hcl.BodyContent{ + Attributes: attrs, + Blocks: blocks, + MissingItemRange: b.original.MissingItemRange(), + } + + return content, diags +} + +func (b *expandBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + extSchema := b.extendSchema(schema) + rawContent, _, diags := b.original.PartialContent(extSchema) + // We discard the "remain" argument above because we're going to construct + // our own remain that also takes into account remaining "dynamic" blocks. + + blocks, blockDiags := b.expandBlocks(schema, rawContent.Blocks, true) + diags = append(diags, blockDiags...) + attrs := b.prepareAttributes(rawContent.Attributes) + + content := &hcl.BodyContent{ + Attributes: attrs, + Blocks: blocks, + MissingItemRange: b.original.MissingItemRange(), + } + + remain := &expandBody{ + original: b.original, + forEachCtx: b.forEachCtx, + iteration: b.iteration, + hiddenAttrs: make(map[string]struct{}), + hiddenBlocks: make(map[string]hcl.BlockHeaderSchema), + } + for name := range b.hiddenAttrs { + remain.hiddenAttrs[name] = struct{}{} + } + for typeName, blockS := range b.hiddenBlocks { + remain.hiddenBlocks[typeName] = blockS + } + for _, attrS := range schema.Attributes { + remain.hiddenAttrs[attrS.Name] = struct{}{} + } + for _, blockS := range schema.Blocks { + remain.hiddenBlocks[blockS.Type] = blockS + } + + return content, remain, diags +} + +func (b *expandBody) extendSchema(schema *hcl.BodySchema) *hcl.BodySchema { + // We augment the requested schema to also include our special "dynamic" + // block type, since then we'll get instances of it interleaved with + // all of the literal child blocks we must also include. + extSchema := &hcl.BodySchema{ + Attributes: schema.Attributes, + Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+len(b.hiddenBlocks)+1), + } + copy(extSchema.Blocks, schema.Blocks) + extSchema.Blocks = append(extSchema.Blocks, dynamicBlockHeaderSchema) + + // If we have any hiddenBlocks then we also need to register those here + // so that a call to "Content" on the underlying body won't fail. + // (We'll filter these out again once we process the result of either + // Content or PartialContent.) + for _, blockS := range b.hiddenBlocks { + extSchema.Blocks = append(extSchema.Blocks, blockS) + } + + // If we have any hiddenAttrs then we also need to register these, for + // the same reason as we deal with hiddenBlocks above. + if len(b.hiddenAttrs) != 0 { + newAttrs := make([]hcl.AttributeSchema, len(schema.Attributes), len(schema.Attributes)+len(b.hiddenAttrs)) + copy(newAttrs, extSchema.Attributes) + for name := range b.hiddenAttrs { + newAttrs = append(newAttrs, hcl.AttributeSchema{ + Name: name, + Required: false, + }) + } + extSchema.Attributes = newAttrs + } + + return extSchema +} + +func (b *expandBody) prepareAttributes(rawAttrs hcl.Attributes) hcl.Attributes { + if len(b.hiddenAttrs) == 0 && b.iteration == nil { + // Easy path: just pass through the attrs from the original body verbatim + return rawAttrs + } + + // Otherwise we have some work to do: we must filter out any attributes + // that are hidden (since a previous PartialContent call already saw these) + // and wrap the expressions of the inner attributes so that they will + // have access to our iteration variables. + attrs := make(hcl.Attributes, len(rawAttrs)) + for name, rawAttr := range rawAttrs { + if _, hidden := b.hiddenAttrs[name]; hidden { + continue + } + if b.iteration != nil { + attr := *rawAttr // shallow copy so we can mutate it + attr.Expr = exprWrap{ + Expression: attr.Expr, + i: b.iteration, + } + attrs[name] = &attr + } else { + // If we have no active iteration then no wrapping is required. + attrs[name] = rawAttr + } + } + return attrs +} + +func (b *expandBody) expandBlocks(schema *hcl.BodySchema, rawBlocks hcl.Blocks, partial bool) (hcl.Blocks, hcl.Diagnostics) { + var blocks hcl.Blocks + var diags hcl.Diagnostics + + for _, rawBlock := range rawBlocks { + switch rawBlock.Type { + case "dynamic": + realBlockType := rawBlock.Labels[0] + if _, hidden := b.hiddenBlocks[realBlockType]; hidden { + continue + } + + var blockS *hcl.BlockHeaderSchema + for _, candidate := range schema.Blocks { + if candidate.Type == realBlockType { + blockS = &candidate + break + } + } + if blockS == nil { + // Not a block type that the caller requested. + if !partial { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported block type", + Detail: fmt.Sprintf("Blocks of type %q are not expected here.", realBlockType), + Subject: &rawBlock.LabelRanges[0], + }) + } + continue + } + + spec, specDiags := b.decodeSpec(blockS, rawBlock) + diags = append(diags, specDiags...) + if specDiags.HasErrors() { + continue + } + + if spec.forEachVal.IsKnown() { + for it := spec.forEachVal.ElementIterator(); it.Next(); { + key, value := it.Element() + i := b.iteration.MakeChild(spec.iteratorName, key, value) + + block, blockDiags := spec.newBlock(i, b.forEachCtx) + diags = append(diags, blockDiags...) + if block != nil { + // Attach our new iteration context so that attributes + // and other nested blocks can refer to our iterator. + block.Body = b.expandChild(block.Body, i) + blocks = append(blocks, block) + } + } + } else { + // If our top-level iteration value isn't known then we're forced + // to compromise since HCL doesn't have any concept of an + // "unknown block". In this case then, we'll produce a single + // dynamic block with the iterator values set to DynamicVal, + // which at least makes the potential for a block visible + // in our result, even though it's not represented in a fully-accurate + // way. + i := b.iteration.MakeChild(spec.iteratorName, cty.DynamicVal, cty.DynamicVal) + block, blockDiags := spec.newBlock(i, b.forEachCtx) + diags = append(diags, blockDiags...) + if block != nil { + block.Body = b.expandChild(block.Body, i) + blocks = append(blocks, block) + } + } + + default: + if _, hidden := b.hiddenBlocks[rawBlock.Type]; !hidden { + // A static block doesn't create a new iteration context, but + // it does need to inherit _our own_ iteration context in + // case it contains expressions that refer to our inherited + // iterators, or nested "dynamic" blocks. + expandedBlock := *rawBlock // shallow copy + expandedBlock.Body = b.expandChild(rawBlock.Body, b.iteration) + blocks = append(blocks, &expandedBlock) + } + } + } + + return blocks, diags +} + +func (b *expandBody) expandChild(child hcl.Body, i *iteration) hcl.Body { + chiCtx := i.EvalContext(b.forEachCtx) + ret := Expand(child, chiCtx) + ret.(*expandBody).iteration = i + return ret +} + +func (b *expandBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + // blocks aren't allowed in JustAttributes mode and this body can + // only produce blocks, so we'll just pass straight through to our + // underlying body here. + return b.original.JustAttributes() +} + +func (b *expandBody) MissingItemRange() hcl.Range { + return b.original.MissingItemRange() +} diff --git a/vendor/github.com/hashicorp/hcl2/ext/dynblock/expand_spec.go b/vendor/github.com/hashicorp/hcl2/ext/dynblock/expand_spec.go new file mode 100644 index 0000000000..41c0be267a --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/ext/dynblock/expand_spec.go @@ -0,0 +1,215 @@ +package dynblock + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +type expandSpec struct { + blockType string + blockTypeRange hcl.Range + defRange hcl.Range + forEachVal cty.Value + iteratorName string + labelExprs []hcl.Expression + contentBody hcl.Body + inherited map[string]*iteration +} + +func (b *expandBody) decodeSpec(blockS *hcl.BlockHeaderSchema, rawSpec *hcl.Block) (*expandSpec, hcl.Diagnostics) { + var diags hcl.Diagnostics + + var schema *hcl.BodySchema + if len(blockS.LabelNames) != 0 { + schema = dynamicBlockBodySchemaLabels + } else { + schema = dynamicBlockBodySchemaNoLabels + } + + specContent, specDiags := rawSpec.Body.Content(schema) + diags = append(diags, specDiags...) + if specDiags.HasErrors() { + return nil, diags + } + + //// for_each attribute + + eachAttr := specContent.Attributes["for_each"] + eachVal, eachDiags := eachAttr.Expr.Value(b.forEachCtx) + diags = append(diags, eachDiags...) + + if !eachVal.CanIterateElements() && eachVal.Type() != cty.DynamicPseudoType { + // We skip this error for DynamicPseudoType because that means we either + // have a null (which is checked immediately below) or an unknown + // (which is handled in the expandBody Content methods). + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid dynamic for_each value", + Detail: fmt.Sprintf("Cannot use a %s value in for_each. An iterable collection is required.", eachVal.Type().FriendlyName()), + Subject: eachAttr.Expr.Range().Ptr(), + Expression: eachAttr.Expr, + EvalContext: b.forEachCtx, + }) + return nil, diags + } + if eachVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid dynamic for_each value", + Detail: "Cannot use a null value in for_each.", + Subject: eachAttr.Expr.Range().Ptr(), + Expression: eachAttr.Expr, + EvalContext: b.forEachCtx, + }) + return nil, diags + } + + //// iterator attribute + + iteratorName := blockS.Type + if iteratorAttr := specContent.Attributes["iterator"]; iteratorAttr != nil { + itTraversal, itDiags := hcl.AbsTraversalForExpr(iteratorAttr.Expr) + diags = append(diags, itDiags...) + if itDiags.HasErrors() { + return nil, diags + } + + if len(itTraversal) != 1 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid dynamic iterator name", + Detail: "Dynamic iterator must be a single variable name.", + Subject: itTraversal.SourceRange().Ptr(), + }) + return nil, diags + } + + iteratorName = itTraversal.RootName() + } + + var labelExprs []hcl.Expression + if labelsAttr := specContent.Attributes["labels"]; labelsAttr != nil { + var labelDiags hcl.Diagnostics + labelExprs, labelDiags = hcl.ExprList(labelsAttr.Expr) + diags = append(diags, labelDiags...) + if labelDiags.HasErrors() { + return nil, diags + } + + if len(labelExprs) > len(blockS.LabelNames) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extraneous dynamic block label", + Detail: fmt.Sprintf("Blocks of type %q require %d label(s).", blockS.Type, len(blockS.LabelNames)), + Subject: labelExprs[len(blockS.LabelNames)].Range().Ptr(), + }) + return nil, diags + } else if len(labelExprs) < len(blockS.LabelNames) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Insufficient dynamic block labels", + Detail: fmt.Sprintf("Blocks of type %q require %d label(s).", blockS.Type, len(blockS.LabelNames)), + Subject: labelsAttr.Expr.Range().Ptr(), + }) + return nil, diags + } + } + + // Since our schema requests only blocks of type "content", we can assume + // that all entries in specContent.Blocks are content blocks. + if len(specContent.Blocks) == 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing dynamic content block", + Detail: "A dynamic block must have a nested block of type \"content\" to describe the body of each generated block.", + Subject: &specContent.MissingItemRange, + }) + return nil, diags + } + if len(specContent.Blocks) > 1 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extraneous dynamic content block", + Detail: "Only one nested content block is allowed for each dynamic block.", + Subject: &specContent.Blocks[1].DefRange, + }) + return nil, diags + } + + return &expandSpec{ + blockType: blockS.Type, + blockTypeRange: rawSpec.LabelRanges[0], + defRange: rawSpec.DefRange, + forEachVal: eachVal, + iteratorName: iteratorName, + labelExprs: labelExprs, + contentBody: specContent.Blocks[0].Body, + }, diags +} + +func (s *expandSpec) newBlock(i *iteration, ctx *hcl.EvalContext) (*hcl.Block, hcl.Diagnostics) { + var diags hcl.Diagnostics + var labels []string + var labelRanges []hcl.Range + lCtx := i.EvalContext(ctx) + for _, labelExpr := range s.labelExprs { + labelVal, labelDiags := labelExpr.Value(lCtx) + diags = append(diags, labelDiags...) + if labelDiags.HasErrors() { + return nil, diags + } + + var convErr error + labelVal, convErr = convert.Convert(labelVal, cty.String) + if convErr != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid dynamic block label", + Detail: fmt.Sprintf("Cannot use this value as a dynamic block label: %s.", convErr), + Subject: labelExpr.Range().Ptr(), + Expression: labelExpr, + EvalContext: lCtx, + }) + return nil, diags + } + if labelVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid dynamic block label", + Detail: "Cannot use a null value as a dynamic block label.", + Subject: labelExpr.Range().Ptr(), + Expression: labelExpr, + EvalContext: lCtx, + }) + return nil, diags + } + if !labelVal.IsKnown() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid dynamic block label", + Detail: "This value is not yet known. Dynamic block labels must be immediately-known values.", + Subject: labelExpr.Range().Ptr(), + Expression: labelExpr, + EvalContext: lCtx, + }) + return nil, diags + } + + labels = append(labels, labelVal.AsString()) + labelRanges = append(labelRanges, labelExpr.Range()) + } + + block := &hcl.Block{ + Type: s.blockType, + TypeRange: s.blockTypeRange, + Labels: labels, + LabelRanges: labelRanges, + DefRange: s.defRange, + Body: s.contentBody, + } + + return block, diags +} diff --git a/vendor/github.com/hashicorp/hcl2/ext/dynblock/expr_wrap.go b/vendor/github.com/hashicorp/hcl2/ext/dynblock/expr_wrap.go new file mode 100644 index 0000000000..6916fc1580 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/ext/dynblock/expr_wrap.go @@ -0,0 +1,42 @@ +package dynblock + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +type exprWrap struct { + hcl.Expression + i *iteration +} + +func (e exprWrap) Variables() []hcl.Traversal { + raw := e.Expression.Variables() + ret := make([]hcl.Traversal, 0, len(raw)) + + // Filter out traversals that refer to our iterator name or any + // iterator we've inherited; we're going to provide those in + // our Value wrapper, so the caller doesn't need to know about them. + for _, traversal := range raw { + rootName := traversal.RootName() + if rootName == e.i.IteratorName { + continue + } + if _, inherited := e.i.Inherited[rootName]; inherited { + continue + } + ret = append(ret, traversal) + } + return ret +} + +func (e exprWrap) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + extCtx := e.i.EvalContext(ctx) + return e.Expression.Value(extCtx) +} + +// UnwrapExpression returns the expression being wrapped by this instance. +// This allows the original expression to be recovered by hcl.UnwrapExpression. +func (e exprWrap) UnwrapExpression() hcl.Expression { + return e.Expression +} diff --git a/vendor/github.com/hashicorp/hcl2/ext/dynblock/iteration.go b/vendor/github.com/hashicorp/hcl2/ext/dynblock/iteration.go new file mode 100644 index 0000000000..7056d33606 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/ext/dynblock/iteration.go @@ -0,0 +1,66 @@ +package dynblock + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +type iteration struct { + IteratorName string + Key cty.Value + Value cty.Value + Inherited map[string]*iteration +} + +func (s *expandSpec) MakeIteration(key, value cty.Value) *iteration { + return &iteration{ + IteratorName: s.iteratorName, + Key: key, + Value: value, + Inherited: s.inherited, + } +} + +func (i *iteration) Object() cty.Value { + return cty.ObjectVal(map[string]cty.Value{ + "key": i.Key, + "value": i.Value, + }) +} + +func (i *iteration) EvalContext(base *hcl.EvalContext) *hcl.EvalContext { + new := base.NewChild() + + if i != nil { + new.Variables = map[string]cty.Value{} + for name, otherIt := range i.Inherited { + new.Variables[name] = otherIt.Object() + } + new.Variables[i.IteratorName] = i.Object() + } + + return new +} + +func (i *iteration) MakeChild(iteratorName string, key, value cty.Value) *iteration { + if i == nil { + // Create entirely new root iteration, then + return &iteration{ + IteratorName: iteratorName, + Key: key, + Value: value, + } + } + + inherited := map[string]*iteration{} + for name, otherIt := range i.Inherited { + inherited[name] = otherIt + } + inherited[i.IteratorName] = i + return &iteration{ + IteratorName: iteratorName, + Key: key, + Value: value, + Inherited: inherited, + } +} diff --git a/vendor/github.com/hashicorp/hcl2/ext/dynblock/public.go b/vendor/github.com/hashicorp/hcl2/ext/dynblock/public.go new file mode 100644 index 0000000000..b7e8ca9517 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/ext/dynblock/public.go @@ -0,0 +1,44 @@ +package dynblock + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// Expand "dynamic" blocks in the given body, returning a new body that +// has those blocks expanded. +// +// The given EvalContext is used when evaluating "for_each" and "labels" +// attributes within dynamic blocks, allowing those expressions access to +// variables and functions beyond the iterator variable created by the +// iteration. +// +// Expand returns no diagnostics because no blocks are actually expanded +// until a call to Content or PartialContent on the returned body, which +// will then expand only the blocks selected by the schema. +// +// "dynamic" blocks are also expanded automatically within nested blocks +// in the given body, including within other dynamic blocks, thus allowing +// multi-dimensional iteration. However, it is not possible to +// dynamically-generate the "dynamic" blocks themselves except through nesting. +// +// parent { +// dynamic "child" { +// for_each = child_objs +// content { +// dynamic "grandchild" { +// for_each = child.value.children +// labels = [grandchild.key] +// content { +// parent_key = child.key +// value = grandchild.value +// } +// } +// } +// } +// } +func Expand(body hcl.Body, ctx *hcl.EvalContext) hcl.Body { + return &expandBody{ + original: body, + forEachCtx: ctx, + } +} diff --git a/vendor/github.com/hashicorp/hcl2/ext/dynblock/schema.go b/vendor/github.com/hashicorp/hcl2/ext/dynblock/schema.go new file mode 100644 index 0000000000..dc8ed5a2f4 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/ext/dynblock/schema.go @@ -0,0 +1,50 @@ +package dynblock + +import "github.com/hashicorp/hcl2/hcl" + +var dynamicBlockHeaderSchema = hcl.BlockHeaderSchema{ + Type: "dynamic", + LabelNames: []string{"type"}, +} + +var dynamicBlockBodySchemaLabels = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "for_each", + Required: true, + }, + { + Name: "iterator", + Required: false, + }, + { + Name: "labels", + Required: true, + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "content", + LabelNames: nil, + }, + }, +} + +var dynamicBlockBodySchemaNoLabels = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "for_each", + Required: true, + }, + { + Name: "iterator", + Required: false, + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "content", + LabelNames: nil, + }, + }, +} diff --git a/vendor/github.com/hashicorp/hcl2/ext/dynblock/variables.go b/vendor/github.com/hashicorp/hcl2/ext/dynblock/variables.go new file mode 100644 index 0000000000..ad838f3e87 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/ext/dynblock/variables.go @@ -0,0 +1,209 @@ +package dynblock + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +// WalkVariables begins the recursive process of walking all expressions and +// nested blocks in the given body and its child bodies while taking into +// account any "dynamic" blocks. +// +// This function requires that the caller walk through the nested block +// structure in the given body level-by-level so that an appropriate schema +// can be provided at each level to inform further processing. This workflow +// is thus easiest to use for calling applications that have some higher-level +// schema representation available with which to drive this multi-step +// process. If your application uses the hcldec package, you may be able to +// use VariablesHCLDec instead for a more automatic approach. +func WalkVariables(body hcl.Body) WalkVariablesNode { + return WalkVariablesNode{ + body: body, + includeContent: true, + } +} + +// WalkExpandVariables is like Variables but it includes only the variables +// required for successful block expansion, ignoring any variables referenced +// inside block contents. The result is the minimal set of all variables +// required for a call to Expand, excluding variables that would only be +// needed to subsequently call Content or PartialContent on the expanded +// body. +func WalkExpandVariables(body hcl.Body) WalkVariablesNode { + return WalkVariablesNode{ + body: body, + } +} + +type WalkVariablesNode struct { + body hcl.Body + it *iteration + + includeContent bool +} + +type WalkVariablesChild struct { + BlockTypeName string + Node WalkVariablesNode +} + +// Body returns the HCL Body associated with the child node, in case the caller +// wants to do some sort of inspection of it in order to decide what schema +// to pass to Visit. +// +// Most implementations should just fetch a fixed schema based on the +// BlockTypeName field and not access this. Deciding on a schema dynamically +// based on the body is a strange thing to do and generally necessary only if +// your caller is already doing other bizarre things with HCL bodies. +func (c WalkVariablesChild) Body() hcl.Body { + return c.Node.body +} + +// Visit returns the variable traversals required for any "dynamic" blocks +// directly in the body associated with this node, and also returns any child +// nodes that must be visited in order to continue the walk. +// +// Each child node has its associated block type name given in its BlockTypeName +// field, which the calling application should use to determine the appropriate +// schema for the content of each child node and pass it to the child node's +// own Visit method to continue the walk recursively. +func (n WalkVariablesNode) Visit(schema *hcl.BodySchema) (vars []hcl.Traversal, children []WalkVariablesChild) { + extSchema := n.extendSchema(schema) + container, _, _ := n.body.PartialContent(extSchema) + if container == nil { + return vars, children + } + + children = make([]WalkVariablesChild, 0, len(container.Blocks)) + + if n.includeContent { + for _, attr := range container.Attributes { + for _, traversal := range attr.Expr.Variables() { + var ours, inherited bool + if n.it != nil { + ours = traversal.RootName() == n.it.IteratorName + _, inherited = n.it.Inherited[traversal.RootName()] + } + + if !(ours || inherited) { + vars = append(vars, traversal) + } + } + } + } + + for _, block := range container.Blocks { + switch block.Type { + + case "dynamic": + blockTypeName := block.Labels[0] + inner, _, _ := block.Body.PartialContent(variableDetectionInnerSchema) + if inner == nil { + continue + } + + iteratorName := blockTypeName + if attr, exists := inner.Attributes["iterator"]; exists { + iterTraversal, _ := hcl.AbsTraversalForExpr(attr.Expr) + if len(iterTraversal) == 0 { + // Ignore this invalid dynamic block, since it'll produce + // an error if someone tries to extract content from it + // later anyway. + continue + } + iteratorName = iterTraversal.RootName() + } + blockIt := n.it.MakeChild(iteratorName, cty.DynamicVal, cty.DynamicVal) + + if attr, exists := inner.Attributes["for_each"]; exists { + // Filter out iterator names inherited from parent blocks + for _, traversal := range attr.Expr.Variables() { + if _, inherited := blockIt.Inherited[traversal.RootName()]; !inherited { + vars = append(vars, traversal) + } + } + } + if attr, exists := inner.Attributes["labels"]; exists { + // Filter out both our own iterator name _and_ those inherited + // from parent blocks, since we provide _both_ of these to the + // label expressions. + for _, traversal := range attr.Expr.Variables() { + ours := traversal.RootName() == iteratorName + _, inherited := blockIt.Inherited[traversal.RootName()] + + if !(ours || inherited) { + vars = append(vars, traversal) + } + } + } + + for _, contentBlock := range inner.Blocks { + // We only request "content" blocks in our schema, so we know + // any blocks we find here will be content blocks. We require + // exactly one content block for actual expansion, but we'll + // be more liberal here so that callers can still collect + // variables from erroneous "dynamic" blocks. + children = append(children, WalkVariablesChild{ + BlockTypeName: blockTypeName, + Node: WalkVariablesNode{ + body: contentBlock.Body, + it: blockIt, + includeContent: n.includeContent, + }, + }) + } + + default: + children = append(children, WalkVariablesChild{ + BlockTypeName: block.Type, + Node: WalkVariablesNode{ + body: block.Body, + it: n.it, + includeContent: n.includeContent, + }, + }) + + } + } + + return vars, children +} + +func (n WalkVariablesNode) extendSchema(schema *hcl.BodySchema) *hcl.BodySchema { + // We augment the requested schema to also include our special "dynamic" + // block type, since then we'll get instances of it interleaved with + // all of the literal child blocks we must also include. + extSchema := &hcl.BodySchema{ + Attributes: schema.Attributes, + Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+1), + } + copy(extSchema.Blocks, schema.Blocks) + extSchema.Blocks = append(extSchema.Blocks, dynamicBlockHeaderSchema) + + return extSchema +} + +// This is a more relaxed schema than what's in schema.go, since we +// want to maximize the amount of variables we can find even if there +// are erroneous blocks. +var variableDetectionInnerSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "for_each", + Required: false, + }, + { + Name: "labels", + Required: false, + }, + { + Name: "iterator", + Required: false, + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "content", + }, + }, +} diff --git a/vendor/github.com/hashicorp/hcl2/ext/dynblock/variables_hcldec.go b/vendor/github.com/hashicorp/hcl2/ext/dynblock/variables_hcldec.go new file mode 100644 index 0000000000..a078d915c0 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/ext/dynblock/variables_hcldec.go @@ -0,0 +1,43 @@ +package dynblock + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcldec" +) + +// VariablesHCLDec is a wrapper around WalkVariables that uses the given hcldec +// specification to automatically drive the recursive walk through nested +// blocks in the given body. +// +// This is a drop-in replacement for hcldec.Variables which is able to treat +// blocks of type "dynamic" in the same special way that dynblock.Expand would, +// exposing both the variables referenced in the "for_each" and "labels" +// arguments and variables used in the nested "content" block. +func VariablesHCLDec(body hcl.Body, spec hcldec.Spec) []hcl.Traversal { + rootNode := WalkVariables(body) + return walkVariablesWithHCLDec(rootNode, spec) +} + +// ExpandVariablesHCLDec is like VariablesHCLDec but it includes only the +// minimal set of variables required to call Expand, ignoring variables that +// are referenced only inside normal block contents. See WalkExpandVariables +// for more information. +func ExpandVariablesHCLDec(body hcl.Body, spec hcldec.Spec) []hcl.Traversal { + rootNode := WalkExpandVariables(body) + return walkVariablesWithHCLDec(rootNode, spec) +} + +func walkVariablesWithHCLDec(node WalkVariablesNode, spec hcldec.Spec) []hcl.Traversal { + vars, children := node.Visit(hcldec.ImpliedSchema(spec)) + + if len(children) > 0 { + childSpecs := hcldec.ChildBlockTypes(spec) + for _, child := range children { + if childSpec, exists := childSpecs[child.BlockTypeName]; exists { + vars = append(vars, walkVariablesWithHCLDec(child.Node, childSpec)...) + } + } + } + + return vars +} diff --git a/vendor/github.com/hashicorp/hcl2/ext/typeexpr/README.md b/vendor/github.com/hashicorp/hcl2/ext/typeexpr/README.md new file mode 100644 index 0000000000..ff2b3f2295 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/ext/typeexpr/README.md @@ -0,0 +1,67 @@ +# HCL Type Expressions Extension + +This HCL extension defines a convention for describing HCL types using function +call and variable reference syntax, allowing configuration formats to include +type information provided by users. + +The type syntax is processed statically from a hcl.Expression, so it cannot +use any of the usual language operators. This is similar to type expressions +in statically-typed programming languages. + +```hcl +variable "example" { + type = list(string) +} +``` + +The extension is built using the `hcl.ExprAsKeyword` and `hcl.ExprCall` +functions, and so it relies on the underlying syntax to define how "keyword" +and "call" are interpreted. The above shows how they are interpreted in +the HCL native syntax, while the following shows the same information +expressed in JSON: + +```json +{ + "variable": { + "example": { + "type": "list(string)" + } + } +} +``` + +Notice that since we have additional contextual information that we intend +to allow only calls and keywords the JSON syntax is able to parse the given +string directly as an expression, rather than as a template as would be +the case for normal expression evaluation. + +For more information, see [the godoc reference](http://godoc.org/github.com/hashicorp/hcl2/ext/typeexpr). + +## Type Expression Syntax + +When expressed in the native syntax, the following expressions are permitted +in a type expression: + +* `string` - string +* `bool` - boolean +* `number` - number +* `any` - `cty.DynamicPseudoType` (in function `TypeConstraint` only) +* `list()` - list of the type given as an argument +* `set()` - set of the type given as an argument +* `map()` - map of the type given as an argument +* `tuple([])` - tuple with the element types given in the single list argument +* `object({=, ...}` - object with the attributes and corresponding types given in the single map argument + +For example: + +* `list(string)` +* `object({name=string,age=number})` +* `map(object({name=string,age=number}))` + +Note that the object constructor syntax is not fully-general for all possible +object types because it requires the attribute names to be valid identifiers. +In practice it is expected that any time an object type is being fixed for +type checking it will be one that has identifiers as its attributes; object +types with weird attributes generally show up only from arbitrary object +constructors in configuration files, which are usually treated either as maps +or as the dynamic pseudo-type. diff --git a/vendor/github.com/hashicorp/hcl2/ext/typeexpr/doc.go b/vendor/github.com/hashicorp/hcl2/ext/typeexpr/doc.go new file mode 100644 index 0000000000..c4b379579d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/ext/typeexpr/doc.go @@ -0,0 +1,11 @@ +// Package typeexpr extends HCL with a convention for describing HCL types +// within configuration files. +// +// The type syntax is processed statically from a hcl.Expression, so it cannot +// use any of the usual language operators. This is similar to type expressions +// in statically-typed programming languages. +// +// variable "example" { +// type = list(string) +// } +package typeexpr diff --git a/vendor/github.com/hashicorp/hcl2/ext/typeexpr/get_type.go b/vendor/github.com/hashicorp/hcl2/ext/typeexpr/get_type.go new file mode 100644 index 0000000000..a84338a85a --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/ext/typeexpr/get_type.go @@ -0,0 +1,196 @@ +package typeexpr + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +const invalidTypeSummary = "Invalid type specification" + +// getType is the internal implementation of both Type and TypeConstraint, +// using the passed flag to distinguish. When constraint is false, the "any" +// keyword will produce an error. +func getType(expr hcl.Expression, constraint bool) (cty.Type, hcl.Diagnostics) { + // First we'll try for one of our keywords + kw := hcl.ExprAsKeyword(expr) + switch kw { + case "bool": + return cty.Bool, nil + case "string": + return cty.String, nil + case "number": + return cty.Number, nil + case "any": + if constraint { + return cty.DynamicPseudoType, nil + } + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: fmt.Sprintf("The keyword %q cannot be used in this type specification: an exact type is required.", kw), + Subject: expr.Range().Ptr(), + }} + case "list", "map", "set": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: fmt.Sprintf("The %s type constructor requires one argument specifying the element type.", kw), + Subject: expr.Range().Ptr(), + }} + case "object": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "The object type constructor requires one argument specifying the attribute types and values as a map.", + Subject: expr.Range().Ptr(), + }} + case "tuple": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "The tuple type constructor requires one argument specifying the element types as a list.", + Subject: expr.Range().Ptr(), + }} + case "": + // okay! we'll fall through and try processing as a call, then. + default: + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: fmt.Sprintf("The keyword %q is not a valid type specification.", kw), + Subject: expr.Range().Ptr(), + }} + } + + // If we get down here then our expression isn't just a keyword, so we'll + // try to process it as a call instead. + call, diags := hcl.ExprCall(expr) + if diags.HasErrors() { + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "A type specification is either a primitive type keyword (bool, number, string) or a complex type constructor call, like list(string).", + Subject: expr.Range().Ptr(), + }} + } + + switch call.Name { + case "bool", "string", "number", "any": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: fmt.Sprintf("Primitive type keyword %q does not expect arguments.", call.Name), + Subject: &call.ArgsRange, + }} + } + + if len(call.Arguments) != 1 { + contextRange := call.ArgsRange + subjectRange := call.ArgsRange + if len(call.Arguments) > 1 { + // If we have too many arguments (as opposed to too _few_) then + // we'll highlight the extraneous arguments as the diagnostic + // subject. + subjectRange = hcl.RangeBetween(call.Arguments[1].Range(), call.Arguments[len(call.Arguments)-1].Range()) + } + + switch call.Name { + case "list", "set", "map": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: fmt.Sprintf("The %s type constructor requires one argument specifying the element type.", call.Name), + Subject: &subjectRange, + Context: &contextRange, + }} + case "object": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "The object type constructor requires one argument specifying the attribute types and values as a map.", + Subject: &subjectRange, + Context: &contextRange, + }} + case "tuple": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "The tuple type constructor requires one argument specifying the element types as a list.", + Subject: &subjectRange, + Context: &contextRange, + }} + } + } + + switch call.Name { + + case "list": + ety, diags := getType(call.Arguments[0], constraint) + return cty.List(ety), diags + case "set": + ety, diags := getType(call.Arguments[0], constraint) + return cty.Set(ety), diags + case "map": + ety, diags := getType(call.Arguments[0], constraint) + return cty.Map(ety), diags + case "object": + attrDefs, diags := hcl.ExprMap(call.Arguments[0]) + if diags.HasErrors() { + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "Object type constructor requires a map whose keys are attribute names and whose values are the corresponding attribute types.", + Subject: call.Arguments[0].Range().Ptr(), + Context: expr.Range().Ptr(), + }} + } + + atys := make(map[string]cty.Type) + for _, attrDef := range attrDefs { + attrName := hcl.ExprAsKeyword(attrDef.Key) + if attrName == "" { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "Object constructor map keys must be attribute names.", + Subject: attrDef.Key.Range().Ptr(), + Context: expr.Range().Ptr(), + }) + continue + } + aty, attrDiags := getType(attrDef.Value, constraint) + diags = append(diags, attrDiags...) + atys[attrName] = aty + } + return cty.Object(atys), diags + case "tuple": + elemDefs, diags := hcl.ExprList(call.Arguments[0]) + if diags.HasErrors() { + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "Tuple type constructor requires a list of element types.", + Subject: call.Arguments[0].Range().Ptr(), + Context: expr.Range().Ptr(), + }} + } + etys := make([]cty.Type, len(elemDefs)) + for i, defExpr := range elemDefs { + ety, elemDiags := getType(defExpr, constraint) + diags = append(diags, elemDiags...) + etys[i] = ety + } + return cty.Tuple(etys), diags + default: + // Can't access call.Arguments in this path because we've not validated + // that it contains exactly one expression here. + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: fmt.Sprintf("Keyword %q is not a valid type constructor.", call.Name), + Subject: expr.Range().Ptr(), + }} + } +} diff --git a/vendor/github.com/hashicorp/hcl2/ext/typeexpr/public.go b/vendor/github.com/hashicorp/hcl2/ext/typeexpr/public.go new file mode 100644 index 0000000000..e3f5eef592 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/ext/typeexpr/public.go @@ -0,0 +1,129 @@ +package typeexpr + +import ( + "bytes" + "fmt" + "sort" + + "github.com/hashicorp/hcl2/hcl/hclsyntax" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +// Type attempts to process the given expression as a type expression and, if +// successful, returns the resulting type. If unsuccessful, error diagnostics +// are returned. +func Type(expr hcl.Expression) (cty.Type, hcl.Diagnostics) { + return getType(expr, false) +} + +// TypeConstraint attempts to parse the given expression as a type constraint +// and, if successful, returns the resulting type. If unsuccessful, error +// diagnostics are returned. +// +// A type constraint has the same structure as a type, but it additionally +// allows the keyword "any" to represent cty.DynamicPseudoType, which is often +// used as a wildcard in type checking and type conversion operations. +func TypeConstraint(expr hcl.Expression) (cty.Type, hcl.Diagnostics) { + return getType(expr, true) +} + +// TypeString returns a string rendering of the given type as it would be +// expected to appear in the HCL native syntax. +// +// This is primarily intended for showing types to the user in an application +// that uses typexpr, where the user can be assumed to be familiar with the +// type expression syntax. In applications that do not use typeexpr these +// results may be confusing to the user and so type.FriendlyName may be +// preferable, even though it's less precise. +// +// TypeString produces reasonable results only for types like what would be +// produced by the Type and TypeConstraint functions. In particular, it cannot +// support capsule types. +func TypeString(ty cty.Type) string { + // Easy cases first + switch ty { + case cty.String: + return "string" + case cty.Bool: + return "bool" + case cty.Number: + return "number" + case cty.DynamicPseudoType: + return "any" + } + + if ty.IsCapsuleType() { + panic("TypeString does not support capsule types") + } + + if ty.IsCollectionType() { + ety := ty.ElementType() + etyString := TypeString(ety) + switch { + case ty.IsListType(): + return fmt.Sprintf("list(%s)", etyString) + case ty.IsSetType(): + return fmt.Sprintf("set(%s)", etyString) + case ty.IsMapType(): + return fmt.Sprintf("map(%s)", etyString) + default: + // Should never happen because the above is exhaustive + panic("unsupported collection type") + } + } + + if ty.IsObjectType() { + var buf bytes.Buffer + buf.WriteString("object({") + atys := ty.AttributeTypes() + names := make([]string, 0, len(atys)) + for name := range atys { + names = append(names, name) + } + sort.Strings(names) + first := true + for _, name := range names { + aty := atys[name] + if !first { + buf.WriteByte(',') + } + if !hclsyntax.ValidIdentifier(name) { + // Should never happen for any type produced by this package, + // but we'll do something reasonable here just so we don't + // produce garbage if someone gives us a hand-assembled object + // type that has weird attribute names. + // Using Go-style quoting here isn't perfect, since it doesn't + // exactly match HCL syntax, but it's fine for an edge-case. + buf.WriteString(fmt.Sprintf("%q", name)) + } else { + buf.WriteString(name) + } + buf.WriteByte('=') + buf.WriteString(TypeString(aty)) + first = false + } + buf.WriteString("})") + return buf.String() + } + + if ty.IsTupleType() { + var buf bytes.Buffer + buf.WriteString("tuple([") + etys := ty.TupleElementTypes() + first := true + for _, ety := range etys { + if !first { + buf.WriteByte(',') + } + buf.WriteString(TypeString(ety)) + first = false + } + buf.WriteString("])") + return buf.String() + } + + // Should never happen because we covered all cases above. + panic(fmt.Errorf("unsupported type %#v", ty)) +} diff --git a/vendor/github.com/hashicorp/hcl2/gohcl/doc.go b/vendor/github.com/hashicorp/hcl2/gohcl/doc.go index 8500214bf0..aa3c6ea9ef 100644 --- a/vendor/github.com/hashicorp/hcl2/gohcl/doc.go +++ b/vendor/github.com/hashicorp/hcl2/gohcl/doc.go @@ -40,6 +40,10 @@ // present then any attributes or blocks not matched by another valid tag // will cause an error diagnostic. // +// Only a subset of this tagging/typing vocabulary is supported for the +// "Encode" family of functions. See the EncodeIntoBody docs for full details +// on the constraints there. +// // Broadly-speaking this package deals with two types of error. The first is // errors in the configuration itself, which are returned as diagnostics // written with the configuration author as the target audience. The second diff --git a/vendor/github.com/hashicorp/hcl2/gohcl/encode.go b/vendor/github.com/hashicorp/hcl2/gohcl/encode.go new file mode 100644 index 0000000000..3cbf7e48af --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/gohcl/encode.go @@ -0,0 +1,191 @@ +package gohcl + +import ( + "fmt" + "reflect" + "sort" + + "github.com/hashicorp/hcl2/hclwrite" + "github.com/zclconf/go-cty/cty/gocty" +) + +// EncodeIntoBody replaces the contents of the given hclwrite Body with +// attributes and blocks derived from the given value, which must be a +// struct value or a pointer to a struct value with the struct tags defined +// in this package. +// +// This function can work only with fully-decoded data. It will ignore any +// fields tagged as "remain", any fields that decode attributes into either +// hcl.Attribute or hcl.Expression values, and any fields that decode blocks +// into hcl.Attributes values. This function does not have enough information +// to complete the decoding of these types. +// +// Any fields tagged as "label" are ignored by this function. Use EncodeAsBlock +// to produce a whole hclwrite.Block including block labels. +// +// As long as a suitable value is given to encode and the destination body +// is non-nil, this function will always complete. It will panic in case of +// any errors in the calling program, such as passing an inappropriate type +// or a nil body. +// +// The layout of the resulting HCL source is derived from the ordering of +// the struct fields, with blank lines around nested blocks of different types. +// Fields representing attributes should usually precede those representing +// blocks so that the attributes can group togather in the result. For more +// control, use the hclwrite API directly. +func EncodeIntoBody(val interface{}, dst *hclwrite.Body) { + rv := reflect.ValueOf(val) + ty := rv.Type() + if ty.Kind() == reflect.Ptr { + rv = rv.Elem() + ty = rv.Type() + } + if ty.Kind() != reflect.Struct { + panic(fmt.Sprintf("value is %s, not struct", ty.Kind())) + } + + tags := getFieldTags(ty) + populateBody(rv, ty, tags, dst) +} + +// EncodeAsBlock creates a new hclwrite.Block populated with the data from +// the given value, which must be a struct or pointer to struct with the +// struct tags defined in this package. +// +// If the given struct type has fields tagged with "label" tags then they +// will be used in order to annotate the created block with labels. +// +// This function has the same constraints as EncodeIntoBody and will panic +// if they are violated. +func EncodeAsBlock(val interface{}, blockType string) *hclwrite.Block { + rv := reflect.ValueOf(val) + ty := rv.Type() + if ty.Kind() == reflect.Ptr { + rv = rv.Elem() + ty = rv.Type() + } + if ty.Kind() != reflect.Struct { + panic(fmt.Sprintf("value is %s, not struct", ty.Kind())) + } + + tags := getFieldTags(ty) + labels := make([]string, len(tags.Labels)) + for i, lf := range tags.Labels { + lv := rv.Field(lf.FieldIndex) + // We just stringify whatever we find. It should always be a string + // but if not then we'll still do something reasonable. + labels[i] = fmt.Sprintf("%s", lv.Interface()) + } + + block := hclwrite.NewBlock(blockType, labels) + populateBody(rv, ty, tags, block.Body()) + return block +} + +func populateBody(rv reflect.Value, ty reflect.Type, tags *fieldTags, dst *hclwrite.Body) { + nameIdxs := make(map[string]int, len(tags.Attributes)+len(tags.Blocks)) + namesOrder := make([]string, 0, len(tags.Attributes)+len(tags.Blocks)) + for n, i := range tags.Attributes { + nameIdxs[n] = i + namesOrder = append(namesOrder, n) + } + for n, i := range tags.Blocks { + nameIdxs[n] = i + namesOrder = append(namesOrder, n) + } + sort.SliceStable(namesOrder, func(i, j int) bool { + ni, nj := namesOrder[i], namesOrder[j] + return nameIdxs[ni] < nameIdxs[nj] + }) + + dst.Clear() + + prevWasBlock := false + for _, name := range namesOrder { + fieldIdx := nameIdxs[name] + field := ty.Field(fieldIdx) + fieldTy := field.Type + fieldVal := rv.Field(fieldIdx) + + if fieldTy.Kind() == reflect.Ptr { + fieldTy = fieldTy.Elem() + fieldVal = fieldVal.Elem() + } + + if _, isAttr := tags.Attributes[name]; isAttr { + + if exprType.AssignableTo(fieldTy) || attrType.AssignableTo(fieldTy) { + continue // ignore undecoded fields + } + if !fieldVal.IsValid() { + continue // ignore (field value is nil pointer) + } + if fieldTy.Kind() == reflect.Ptr && fieldVal.IsNil() { + continue // ignore + } + if prevWasBlock { + dst.AppendNewline() + prevWasBlock = false + } + + valTy, err := gocty.ImpliedType(fieldVal.Interface()) + if err != nil { + panic(fmt.Sprintf("cannot encode %T as HCL expression: %s", fieldVal.Interface(), err)) + } + + val, err := gocty.ToCtyValue(fieldVal.Interface(), valTy) + if err != nil { + // This should never happen, since we should always be able + // to decode into the implied type. + panic(fmt.Sprintf("failed to encode %T as %#v: %s", fieldVal.Interface(), valTy, err)) + } + + dst.SetAttributeValue(name, val) + + } else { // must be a block, then + elemTy := fieldTy + isSeq := false + if elemTy.Kind() == reflect.Slice || elemTy.Kind() == reflect.Array { + isSeq = true + elemTy = elemTy.Elem() + } + + if bodyType.AssignableTo(elemTy) || attrsType.AssignableTo(elemTy) { + continue // ignore undecoded fields + } + prevWasBlock = false + + if isSeq { + l := fieldVal.Len() + for i := 0; i < l; i++ { + elemVal := fieldVal.Index(i) + if !elemVal.IsValid() { + continue // ignore (elem value is nil pointer) + } + if elemTy.Kind() == reflect.Ptr && elemVal.IsNil() { + continue // ignore + } + block := EncodeAsBlock(elemVal.Interface(), name) + if !prevWasBlock { + dst.AppendNewline() + prevWasBlock = true + } + dst.AppendBlock(block) + } + } else { + if !fieldVal.IsValid() { + continue // ignore (field value is nil pointer) + } + if elemTy.Kind() == reflect.Ptr && fieldVal.IsNil() { + continue // ignore + } + block := EncodeAsBlock(fieldVal.Interface(), name) + if !prevWasBlock { + dst.AppendNewline() + prevWasBlock = true + } + dst.AppendBlock(block) + } + } + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go index 6ecf7447f5..c320961e11 100644 --- a/vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go +++ b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go @@ -26,14 +26,43 @@ const ( type Diagnostic struct { Severity DiagnosticSeverity - // Summary and detail contain the English-language description of the + // Summary and Detail contain the English-language description of the // problem. Summary is a terse description of the general problem and // detail is a more elaborate, often-multi-sentence description of // the probem and what might be done to solve it. Summary string Detail string + + // Subject and Context are both source ranges relating to the diagnostic. + // + // Subject is a tight range referring to exactly the construct that + // is problematic, while Context is an optional broader range (which should + // fully contain Subject) that ought to be shown around Subject when + // generating isolated source-code snippets in diagnostic messages. + // If Context is nil, the Subject is also the Context. + // + // Some diagnostics have no source ranges at all. If Context is set then + // Subject should always also be set. Subject *Range Context *Range + + // For diagnostics that occur when evaluating an expression, Expression + // may refer to that expression and EvalContext may point to the + // EvalContext that was active when evaluating it. This may allow for the + // inclusion of additional useful information when rendering a diagnostic + // message to the user. + // + // It is not always possible to select a single EvalContext for a + // diagnostic, and so in some cases this field may be nil even when an + // expression causes a problem. + // + // EvalContexts form a tree, so the given EvalContext may refer to a parent + // which in turn refers to another parent, etc. For a full picture of all + // of the active variables and functions the caller must walk up this + // chain, preferring definitions that are "closer" to the expression in + // case of colliding names. + Expression Expression + EvalContext *EvalContext } // Diagnostics is a list of Diagnostic instances. @@ -96,6 +125,17 @@ func (d Diagnostics) HasErrors() bool { return false } +func (d Diagnostics) Errs() []error { + var errs []error + for _, diag := range d { + if diag.Severity == DiagError { + errs = append(errs, diag) + } + } + + return errs +} + // A DiagnosticWriter emits diagnostics somehow. type DiagnosticWriter interface { WriteDiagnostic(*Diagnostic) error diff --git a/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go index dfa473add8..0b4a2629b9 100644 --- a/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go +++ b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go @@ -2,11 +2,14 @@ package hcl import ( "bufio" + "bytes" "errors" "fmt" "io" + "sort" wordwrap "github.com/mitchellh/go-wordwrap" + "github.com/zclconf/go-cty/cty" ) type diagnosticTextWriter struct { @@ -133,6 +136,62 @@ func (w *diagnosticTextWriter) WriteDiagnostic(diag *Diagnostic) error { w.wr.Write([]byte{'\n'}) } + + if diag.Expression != nil && diag.EvalContext != nil { + // We will attempt to render the values for any variables + // referenced in the given expression as additional context, for + // situations where the same expression is evaluated multiple + // times in different scopes. + expr := diag.Expression + ctx := diag.EvalContext + + vars := expr.Variables() + stmts := make([]string, 0, len(vars)) + seen := make(map[string]struct{}, len(vars)) + for _, traversal := range vars { + val, diags := traversal.TraverseAbs(ctx) + if diags.HasErrors() { + // Skip anything that generates errors, since we probably + // already have the same error in our diagnostics set + // already. + continue + } + + traversalStr := w.traversalStr(traversal) + if _, exists := seen[traversalStr]; exists { + continue // don't show duplicates when the same variable is referenced multiple times + } + switch { + case !val.IsKnown(): + // Can't say anything about this yet, then. + continue + case val.IsNull(): + stmts = append(stmts, fmt.Sprintf("%s set to null", traversalStr)) + default: + stmts = append(stmts, fmt.Sprintf("%s as %s", traversalStr, w.valueStr(val))) + } + seen[traversalStr] = struct{}{} + } + + sort.Strings(stmts) // FIXME: Should maybe use a traversal-aware sort that can sort numeric indexes properly? + last := len(stmts) - 1 + + for i, stmt := range stmts { + switch i { + case 0: + w.wr.Write([]byte{'w', 'i', 't', 'h', ' '}) + default: + w.wr.Write([]byte{' ', ' ', ' ', ' ', ' '}) + } + w.wr.Write([]byte(stmt)) + switch i { + case last: + w.wr.Write([]byte{'.', '\n', '\n'}) + default: + w.wr.Write([]byte{',', '\n'}) + } + } + } } if diag.Detail != "" { @@ -156,6 +215,90 @@ func (w *diagnosticTextWriter) WriteDiagnostics(diags Diagnostics) error { return nil } +func (w *diagnosticTextWriter) traversalStr(traversal Traversal) string { + // This is a specialized subset of traversal rendering tailored to + // producing helpful contextual messages in diagnostics. It is not + // comprehensive nor intended to be used for other purposes. + + var buf bytes.Buffer + for _, step := range traversal { + switch tStep := step.(type) { + case TraverseRoot: + buf.WriteString(tStep.Name) + case TraverseAttr: + buf.WriteByte('.') + buf.WriteString(tStep.Name) + case TraverseIndex: + buf.WriteByte('[') + if keyTy := tStep.Key.Type(); keyTy.IsPrimitiveType() { + buf.WriteString(w.valueStr(tStep.Key)) + } else { + // We'll just use a placeholder for more complex values, + // since otherwise our result could grow ridiculously long. + buf.WriteString("...") + } + buf.WriteByte(']') + } + } + return buf.String() +} + +func (w *diagnosticTextWriter) valueStr(val cty.Value) string { + // This is a specialized subset of value rendering tailored to producing + // helpful but concise messages in diagnostics. It is not comprehensive + // nor intended to be used for other purposes. + + ty := val.Type() + switch { + case val.IsNull(): + return "null" + case !val.IsKnown(): + // Should never happen here because we should filter before we get + // in here, but we'll do something reasonable rather than panic. + return "(not yet known)" + case ty == cty.Bool: + if val.True() { + return "true" + } + return "false" + case ty == cty.Number: + bf := val.AsBigFloat() + return bf.Text('g', 10) + case ty == cty.String: + // Go string syntax is not exactly the same as HCL native string syntax, + // but we'll accept the minor edge-cases where this is different here + // for now, just to get something reasonable here. + return fmt.Sprintf("%q", val.AsString()) + case ty.IsCollectionType() || ty.IsTupleType(): + l := val.LengthInt() + switch l { + case 0: + return "empty " + ty.FriendlyName() + case 1: + return ty.FriendlyName() + " with 1 element" + default: + return fmt.Sprintf("%s with %d elements", ty.FriendlyName(), l) + } + case ty.IsObjectType(): + atys := ty.AttributeTypes() + l := len(atys) + switch l { + case 0: + return "object with no attributes" + case 1: + var name string + for k := range atys { + name = k + } + return fmt.Sprintf("object with 1 attribute %q", name) + default: + return fmt.Sprintf("object with %d attributes", l) + } + default: + return ty.FriendlyName() + } +} + func contextString(file *File, offset int) string { type contextStringer interface { ContextString(offset int) string diff --git a/vendor/github.com/hashicorp/hcl2/hcl/expr_call.go b/vendor/github.com/hashicorp/hcl2/hcl/expr_call.go new file mode 100644 index 0000000000..6963fbae36 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/expr_call.go @@ -0,0 +1,46 @@ +package hcl + +// ExprCall tests if the given expression is a function call and, +// if so, extracts the function name and the expressions that represent +// the arguments. If the given expression is not statically a function call, +// error diagnostics are returned. +// +// A particular Expression implementation can support this function by +// offering a method called ExprCall that takes no arguments and returns +// *StaticCall. This method should return nil if a static call cannot +// be extracted. Alternatively, an implementation can support +// UnwrapExpression to delegate handling of this function to a wrapped +// Expression object. +func ExprCall(expr Expression) (*StaticCall, Diagnostics) { + type exprCall interface { + ExprCall() *StaticCall + } + + physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool { + _, supported := expr.(exprCall) + return supported + }) + + if exC, supported := physExpr.(exprCall); supported { + if call := exC.ExprCall(); call != nil { + return call, nil + } + } + return nil, Diagnostics{ + &Diagnostic{ + Severity: DiagError, + Summary: "Invalid expression", + Detail: "A static function call is required.", + Subject: expr.StartRange().Ptr(), + }, + } +} + +// StaticCall represents a function call that was extracted statically from +// an expression using ExprCall. +type StaticCall struct { + Name string + NameRange Range + Arguments []Expression + ArgsRange Range +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/diagnostics.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/diagnostics.go new file mode 100644 index 0000000000..94eaf58929 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/diagnostics.go @@ -0,0 +1,23 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// setDiagEvalContext is an internal helper that will impose a particular +// EvalContext on a set of diagnostics in-place, for any diagnostic that +// does not already have an EvalContext set. +// +// We generally expect diagnostics to be immutable, but this is safe to use +// on any Diagnostics where none of the contained Diagnostic objects have yet +// been seen by a caller. Its purpose is to apply additional context to a +// set of diagnostics produced by a "deeper" component as the stack unwinds +// during expression evaluation. +func setDiagEvalContext(diags hcl.Diagnostics, expr hcl.Expression, ctx *hcl.EvalContext) { + for _, diag := range diags { + if diag.Expression == nil { + diag.Expression = expr + diag.EvalContext = ctx + } + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go index db4b779a80..26819a2daf 100644 --- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go @@ -2,6 +2,7 @@ package hclsyntax import ( "fmt" + "sync" "github.com/hashicorp/hcl2/hcl" "github.com/zclconf/go-cty/cty" @@ -104,7 +105,9 @@ func (e *ScopeTraversalExpr) walkChildNodes(w internalWalkFunc) { } func (e *ScopeTraversalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - return e.Traversal.TraverseAbs(ctx) + val, diags := e.Traversal.TraverseAbs(ctx) + setDiagEvalContext(diags, e, ctx) + return val, diags } func (e *ScopeTraversalExpr) Range() hcl.Range { @@ -129,12 +132,13 @@ type RelativeTraversalExpr struct { } func (e *RelativeTraversalExpr) walkChildNodes(w internalWalkFunc) { - // Scope traversals have no child nodes + w(e.Source) } func (e *RelativeTraversalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { src, diags := e.Source.Value(ctx) ret, travDiags := e.Traversal.TraverseRel(src) + setDiagEvalContext(travDiags, e, ctx) diags = append(diags, travDiags...) return ret, diags } @@ -177,8 +181,8 @@ type FunctionCallExpr struct { } func (e *FunctionCallExpr) walkChildNodes(w internalWalkFunc) { - for i, arg := range e.Args { - e.Args[i] = w(arg).(Expression) + for _, arg := range e.Args { + w(arg) } } @@ -206,10 +210,12 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti if !hasNonNilMap { return cty.DynamicVal, hcl.Diagnostics{ { - Severity: hcl.DiagError, - Summary: "Function calls not allowed", - Detail: "Functions may not be called here.", - Subject: e.Range().Ptr(), + Severity: hcl.DiagError, + Summary: "Function calls not allowed", + Detail: "Functions may not be called here.", + Subject: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, }, } } @@ -225,11 +231,13 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti return cty.DynamicVal, hcl.Diagnostics{ { - Severity: hcl.DiagError, - Summary: "Call to unknown function", - Detail: fmt.Sprintf("There is no function named %q.%s", e.Name, suggestion), - Subject: &e.NameRange, - Context: e.Range().Ptr(), + Severity: hcl.DiagError, + Summary: "Call to unknown function", + Detail: fmt.Sprintf("There is no function named %q.%s", e.Name, suggestion), + Subject: &e.NameRange, + Context: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, }, } } @@ -254,11 +262,13 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti case expandVal.Type().IsTupleType() || expandVal.Type().IsListType() || expandVal.Type().IsSetType(): if expandVal.IsNull() { diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid expanding argument value", - Detail: "The expanding argument (indicated by ...) must not be null.", - Context: expandExpr.Range().Ptr(), - Subject: e.Range().Ptr(), + Severity: hcl.DiagError, + Summary: "Invalid expanding argument value", + Detail: "The expanding argument (indicated by ...) must not be null.", + Subject: expandExpr.Range().Ptr(), + Context: e.Range().Ptr(), + Expression: expandExpr, + EvalContext: ctx, }) return cty.DynamicVal, diags } @@ -279,11 +289,13 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti args = newArgs default: diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid expanding argument value", - Detail: "The expanding argument (indicated by ...) must be of a tuple, list, or set type.", - Context: expandExpr.Range().Ptr(), - Subject: e.Range().Ptr(), + Severity: hcl.DiagError, + Summary: "Invalid expanding argument value", + Detail: "The expanding argument (indicated by ...) must be of a tuple, list, or set type.", + Subject: expandExpr.Range().Ptr(), + Context: e.Range().Ptr(), + Expression: expandExpr, + EvalContext: ctx, }) return cty.DynamicVal, diags } @@ -303,8 +315,10 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti "Function %q expects%s %d argument(s). Missing value for %q.", e.Name, qual, len(params), missing.Name, ), - Subject: &e.CloseParenRange, - Context: e.Range().Ptr(), + Subject: &e.CloseParenRange, + Context: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, }, } } @@ -318,8 +332,10 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti "Function %q expects only %d argument(s).", e.Name, len(params), ), - Subject: args[len(params)].StartRange().Ptr(), - Context: e.Range().Ptr(), + Subject: args[len(params)].StartRange().Ptr(), + Context: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, }, } } @@ -349,8 +365,10 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti "Invalid value for %q parameter: %s.", param.Name, err, ), - Subject: argExpr.StartRange().Ptr(), - Context: e.Range().Ptr(), + Subject: argExpr.StartRange().Ptr(), + Context: e.Range().Ptr(), + Expression: argExpr, + EvalContext: ctx, }) } @@ -386,8 +404,10 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti "Invalid value for %q parameter: %s.", param.Name, err, ), - Subject: argExpr.StartRange().Ptr(), - Context: e.Range().Ptr(), + Subject: argExpr.StartRange().Ptr(), + Context: e.Range().Ptr(), + Expression: argExpr, + EvalContext: ctx, }) default: @@ -398,8 +418,10 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti "Call to function %q failed: %s.", e.Name, err, ), - Subject: e.StartRange().Ptr(), - Context: e.Range().Ptr(), + Subject: e.StartRange().Ptr(), + Context: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, }) } @@ -417,6 +439,21 @@ func (e *FunctionCallExpr) StartRange() hcl.Range { return hcl.RangeBetween(e.NameRange, e.OpenParenRange) } +// Implementation for hcl.ExprCall. +func (e *FunctionCallExpr) ExprCall() *hcl.StaticCall { + ret := &hcl.StaticCall{ + Name: e.Name, + NameRange: e.NameRange, + Arguments: make([]hcl.Expression, len(e.Args)), + ArgsRange: hcl.RangeBetween(e.OpenParenRange, e.CloseParenRange), + } + // Need to convert our own Expression objects into hcl.Expression. + for i, arg := range e.Args { + ret.Arguments[i] = arg + } + return ret +} + type ConditionalExpr struct { Condition Expression TrueResult Expression @@ -426,9 +463,9 @@ type ConditionalExpr struct { } func (e *ConditionalExpr) walkChildNodes(w internalWalkFunc) { - e.Condition = w(e.Condition).(Expression) - e.TrueResult = w(e.TrueResult).(Expression) - e.FalseResult = w(e.FalseResult).(Expression) + w(e.Condition) + w(e.TrueResult) + w(e.FalseResult) } func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { @@ -449,10 +486,12 @@ func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostic // "These expressions are object and object respectively" if the // object types don't exactly match. "The true and false result expressions must have consistent types. The given expressions are %s and %s, respectively.", - trueResult.Type(), falseResult.Type(), + trueResult.Type().FriendlyName(), falseResult.Type().FriendlyName(), ), - Subject: hcl.RangeBetween(e.TrueResult.Range(), e.FalseResult.Range()).Ptr(), - Context: &e.SrcRange, + Subject: hcl.RangeBetween(e.TrueResult.Range(), e.FalseResult.Range()).Ptr(), + Context: &e.SrcRange, + Expression: e, + EvalContext: ctx, }, } } @@ -461,11 +500,13 @@ func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostic diags = append(diags, condDiags...) if condResult.IsNull() { diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Null condition", - Detail: "The condition value is null. Conditions must either be true or false.", - Subject: e.Condition.Range().Ptr(), - Context: &e.SrcRange, + Severity: hcl.DiagError, + Summary: "Null condition", + Detail: "The condition value is null. Conditions must either be true or false.", + Subject: e.Condition.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.Condition, + EvalContext: ctx, }) return cty.UnknownVal(resultType), diags } @@ -475,11 +516,13 @@ func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostic condResult, err := convert.Convert(condResult, cty.Bool) if err != nil { diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect condition type", - Detail: fmt.Sprintf("The condition expression must be of type bool."), - Subject: e.Condition.Range().Ptr(), - Context: &e.SrcRange, + Severity: hcl.DiagError, + Summary: "Incorrect condition type", + Detail: fmt.Sprintf("The condition expression must be of type bool."), + Subject: e.Condition.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.Condition, + EvalContext: ctx, }) return cty.UnknownVal(resultType), diags } @@ -498,8 +541,10 @@ func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostic "The true result value has the wrong type: %s.", err.Error(), ), - Subject: e.TrueResult.Range().Ptr(), - Context: &e.SrcRange, + Subject: e.TrueResult.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.TrueResult, + EvalContext: ctx, }) trueResult = cty.UnknownVal(resultType) } @@ -519,8 +564,10 @@ func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostic "The false result value has the wrong type: %s.", err.Error(), ), - Subject: e.TrueResult.Range().Ptr(), - Context: &e.SrcRange, + Subject: e.FalseResult.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.FalseResult, + EvalContext: ctx, }) falseResult = cty.UnknownVal(resultType) } @@ -546,8 +593,8 @@ type IndexExpr struct { } func (e *IndexExpr) walkChildNodes(w internalWalkFunc) { - e.Collection = w(e.Collection).(Expression) - e.Key = w(e.Key).(Expression) + w(e.Collection) + w(e.Key) } func (e *IndexExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { @@ -557,7 +604,10 @@ func (e *IndexExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { diags = append(diags, collDiags...) diags = append(diags, keyDiags...) - return hcl.Index(coll, key, &e.SrcRange) + val, indexDiags := hcl.Index(coll, key, &e.SrcRange) + setDiagEvalContext(indexDiags, e, ctx) + diags = append(diags, indexDiags...) + return val, diags } func (e *IndexExpr) Range() hcl.Range { @@ -576,8 +626,8 @@ type TupleConsExpr struct { } func (e *TupleConsExpr) walkChildNodes(w internalWalkFunc) { - for i, expr := range e.Exprs { - e.Exprs[i] = w(expr).(Expression) + for _, expr := range e.Exprs { + w(expr) } } @@ -625,9 +675,9 @@ type ObjectConsItem struct { } func (e *ObjectConsExpr) walkChildNodes(w internalWalkFunc) { - for i, item := range e.Items { - e.Items[i].KeyExpr = w(item.KeyExpr).(Expression) - e.Items[i].ValueExpr = w(item.ValueExpr).(Expression) + for _, item := range e.Items { + w(item.KeyExpr) + w(item.ValueExpr) } } @@ -660,10 +710,12 @@ func (e *ObjectConsExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics if key.IsNull() { diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Null value as key", - Detail: "Can't use a null value as a key.", - Subject: item.ValueExpr.Range().Ptr(), + Severity: hcl.DiagError, + Summary: "Null value as key", + Detail: "Can't use a null value as a key.", + Subject: item.ValueExpr.Range().Ptr(), + Expression: item.KeyExpr, + EvalContext: ctx, }) known = false continue @@ -673,10 +725,12 @@ func (e *ObjectConsExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics key, err = convert.Convert(key, cty.String) if err != nil { diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect key type", - Detail: fmt.Sprintf("Can't use this value as a key: %s.", err.Error()), - Subject: item.ValueExpr.Range().Ptr(), + Severity: hcl.DiagError, + Summary: "Incorrect key type", + Detail: fmt.Sprintf("Can't use this value as a key: %s.", err.Error()), + Subject: item.KeyExpr.Range().Ptr(), + Expression: item.KeyExpr, + EvalContext: ctx, }) known = false continue @@ -739,11 +793,31 @@ func (e *ObjectConsKeyExpr) walkChildNodes(w internalWalkFunc) { // We only treat our wrapped expression as a real expression if we're // not going to interpret it as a literal. if e.literalName() == "" { - e.Wrapped = w(e.Wrapped).(Expression) + w(e.Wrapped) } } func (e *ObjectConsKeyExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + // Because we accept a naked identifier as a literal key rather than a + // reference, it's confusing to accept a traversal containing periods + // here since we can't tell if the user intends to create a key with + // periods or actually reference something. To avoid confusing downstream + // errors we'll just prohibit a naked multi-step traversal here and + // require the user to state their intent more clearly. + // (This is handled at evaluation time rather than parse time because + // an application using static analysis _can_ accept a naked multi-step + // traversal here, if desired.) + if travExpr, isTraversal := e.Wrapped.(*ScopeTraversalExpr); isTraversal && len(travExpr.Traversal) > 1 { + var diags hcl.Diagnostics + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Ambiguous attribute key", + Detail: "If this expression is intended to be a reference, wrap it in parentheses. If it's instead intended as a literal name containing periods, wrap it in quotes to create a string literal.", + Subject: e.Range().Ptr(), + }) + return cty.DynamicVal, diags + } + if ln := e.literalName(); ln != "" { return cty.StringVal(ln), nil } @@ -803,11 +877,13 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { if collVal.IsNull() { diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Iteration over null value", - Detail: "A null value cannot be used as the collection in a 'for' expression.", - Subject: e.CollExpr.Range().Ptr(), - Context: &e.SrcRange, + Severity: hcl.DiagError, + Summary: "Iteration over null value", + Detail: "A null value cannot be used as the collection in a 'for' expression.", + Subject: e.CollExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CollExpr, + EvalContext: ctx, }) return cty.DynamicVal, diags } @@ -822,8 +898,10 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { "A value of type %s cannot be used as the collection in a 'for' expression.", collVal.Type().FriendlyName(), ), - Subject: e.CollExpr.Range().Ptr(), - Context: &e.SrcRange, + Subject: e.CollExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CollExpr, + EvalContext: ctx, }) return cty.DynamicVal, diags } @@ -831,14 +909,13 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { return cty.DynamicVal, diags } - childCtx := ctx.NewChild() - childCtx.Variables = map[string]cty.Value{} - // Before we start we'll do an early check to see if any CondExpr we've // been given is of the wrong type. This isn't 100% reliable (it may // be DynamicVal until real values are given) but it should catch some // straightforward cases and prevent a barrage of repeated errors. if e.CondExpr != nil { + childCtx := ctx.NewChild() + childCtx.Variables = map[string]cty.Value{} if e.KeyVar != "" { childCtx.Variables[e.KeyVar] = cty.DynamicVal } @@ -848,22 +925,26 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { diags = append(diags, condDiags...) if result.IsNull() { diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Condition is null", - Detail: "The value of the 'if' clause must not be null.", - Subject: e.CondExpr.Range().Ptr(), - Context: &e.SrcRange, + Severity: hcl.DiagError, + Summary: "Condition is null", + Detail: "The value of the 'if' clause must not be null.", + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CondExpr, + EvalContext: ctx, }) return cty.DynamicVal, diags } _, err := convert.Convert(result, cty.Bool) if err != nil { diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid 'for' condition", - Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()), - Subject: e.CondExpr.Range().Ptr(), - Context: &e.SrcRange, + Severity: hcl.DiagError, + Summary: "Invalid 'for' condition", + Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()), + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CondExpr, + EvalContext: ctx, }) return cty.DynamicVal, diags } @@ -887,6 +968,8 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { known := true for it.Next() { k, v := it.Element() + childCtx := ctx.NewChild() + childCtx.Variables = map[string]cty.Value{} if e.KeyVar != "" { childCtx.Variables[e.KeyVar] = k } @@ -898,11 +981,13 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { if includeRaw.IsNull() { if known { diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Condition is null", - Detail: "The value of the 'if' clause must not be null.", - Subject: e.CondExpr.Range().Ptr(), - Context: &e.SrcRange, + Severity: hcl.DiagError, + Summary: "Invalid 'for' condition", + Detail: "The value of the 'if' clause must not be null.", + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CondExpr, + EvalContext: childCtx, }) } known = false @@ -912,11 +997,13 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { if err != nil { if known { diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid 'for' condition", - Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()), - Subject: e.CondExpr.Range().Ptr(), - Context: &e.SrcRange, + Severity: hcl.DiagError, + Summary: "Invalid 'for' condition", + Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()), + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CondExpr, + EvalContext: childCtx, }) } known = false @@ -938,11 +1025,13 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { if keyRaw.IsNull() { if known { diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid object key", - Detail: "Key expression in 'for' expression must not produce a null value.", - Subject: e.KeyExpr.Range().Ptr(), - Context: &e.SrcRange, + Severity: hcl.DiagError, + Summary: "Invalid object key", + Detail: "Key expression in 'for' expression must not produce a null value.", + Subject: e.KeyExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.KeyExpr, + EvalContext: childCtx, }) } known = false @@ -957,11 +1046,13 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { if err != nil { if known { diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid object key", - Detail: fmt.Sprintf("The key expression produced an invalid result: %s.", err.Error()), - Subject: e.KeyExpr.Range().Ptr(), - Context: &e.SrcRange, + Severity: hcl.DiagError, + Summary: "Invalid object key", + Detail: fmt.Sprintf("The key expression produced an invalid result: %s.", err.Error()), + Subject: e.KeyExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.KeyExpr, + EvalContext: childCtx, }) } known = false @@ -981,11 +1072,13 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { Severity: hcl.DiagError, Summary: "Duplicate object key", Detail: fmt.Sprintf( - "Two different items produced the key %q in this for expression. If duplicates are expected, use the ellipsis (...) after the value expression to enable grouping by key.", + "Two different items produced the key %q in this 'for' expression. If duplicates are expected, use the ellipsis (...) after the value expression to enable grouping by key.", k, ), - Subject: e.KeyExpr.Range().Ptr(), - Context: &e.SrcRange, + Subject: e.KeyExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.KeyExpr, + EvalContext: childCtx, }) } else { vals[key.AsString()] = val @@ -1015,6 +1108,8 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { known := true for it.Next() { k, v := it.Element() + childCtx := ctx.NewChild() + childCtx.Variables = map[string]cty.Value{} if e.KeyVar != "" { childCtx.Variables[e.KeyVar] = k } @@ -1026,11 +1121,13 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { if includeRaw.IsNull() { if known { diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Condition is null", - Detail: "The value of the 'if' clause must not be null.", - Subject: e.CondExpr.Range().Ptr(), - Context: &e.SrcRange, + Severity: hcl.DiagError, + Summary: "Invalid 'for' condition", + Detail: "The value of the 'if' clause must not be null.", + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CondExpr, + EvalContext: childCtx, }) } known = false @@ -1048,11 +1145,13 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { if err != nil { if known { diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid 'for' condition", - Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()), - Subject: e.CondExpr.Range().Ptr(), - Context: &e.SrcRange, + Severity: hcl.DiagError, + Summary: "Invalid 'for' condition", + Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()), + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CondExpr, + EvalContext: childCtx, }) } known = false @@ -1079,7 +1178,7 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { } func (e *ForExpr) walkChildNodes(w internalWalkFunc) { - e.CollExpr = w(e.CollExpr).(Expression) + w(e.CollExpr) scopeNames := map[string]struct{}{} if e.KeyVar != "" { @@ -1092,17 +1191,17 @@ func (e *ForExpr) walkChildNodes(w internalWalkFunc) { if e.KeyExpr != nil { w(ChildScope{ LocalNames: scopeNames, - Expr: &e.KeyExpr, + Expr: e.KeyExpr, }) } w(ChildScope{ LocalNames: scopeNames, - Expr: &e.ValExpr, + Expr: e.ValExpr, }) if e.CondExpr != nil { w(ChildScope{ LocalNames: scopeNames, - Expr: &e.CondExpr, + Expr: e.CondExpr, }) } } @@ -1136,26 +1235,78 @@ func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { return cty.DynamicVal, diags } + sourceTy := sourceVal.Type() + if sourceTy == cty.DynamicPseudoType { + // If we don't even know the _type_ of our source value yet then + // we'll need to defer all processing, since we can't decide our + // result type either. + return cty.DynamicVal, diags + } + + // A "special power" of splat expressions is that they can be applied + // both to tuples/lists and to other values, and in the latter case + // the value will be treated as an implicit single-item tuple, or as + // an empty tuple if the value is null. + autoUpgrade := !(sourceTy.IsTupleType() || sourceTy.IsListType() || sourceTy.IsSetType()) + if sourceVal.IsNull() { + if autoUpgrade { + return cty.EmptyTupleVal, diags + } diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Splat of null value", - Detail: "Splat expressions (with the * symbol) cannot be applied to null values.", - Subject: e.Source.Range().Ptr(), - Context: hcl.RangeBetween(e.Source.Range(), e.MarkerRange).Ptr(), + Severity: hcl.DiagError, + Summary: "Splat of null value", + Detail: "Splat expressions (with the * symbol) cannot be applied to null sequences.", + Subject: e.Source.Range().Ptr(), + Context: hcl.RangeBetween(e.Source.Range(), e.MarkerRange).Ptr(), + Expression: e.Source, + EvalContext: ctx, }) return cty.DynamicVal, diags } - if !sourceVal.IsKnown() { - return cty.DynamicVal, diags + + if autoUpgrade { + sourceVal = cty.TupleVal([]cty.Value{sourceVal}) + sourceTy = sourceVal.Type() } - // A "special power" of splat expressions is that they can be applied - // both to tuples/lists and to other values, and in the latter case - // the value will be treated as an implicit single-value list. We'll - // deal with that here first. - if !(sourceVal.Type().IsTupleType() || sourceVal.Type().IsListType()) { - sourceVal = cty.ListVal([]cty.Value{sourceVal}) + // We'll compute our result type lazily if we need it. In the normal case + // it's inferred automatically from the value we construct. + resultTy := func() (cty.Type, hcl.Diagnostics) { + chiCtx := ctx.NewChild() + var diags hcl.Diagnostics + switch { + case sourceTy.IsListType() || sourceTy.IsSetType(): + ety := sourceTy.ElementType() + e.Item.setValue(chiCtx, cty.UnknownVal(ety)) + val, itemDiags := e.Each.Value(chiCtx) + diags = append(diags, itemDiags...) + e.Item.clearValue(chiCtx) // clean up our temporary value + return cty.List(val.Type()), diags + case sourceTy.IsTupleType(): + etys := sourceTy.TupleElementTypes() + resultTys := make([]cty.Type, 0, len(etys)) + for _, ety := range etys { + e.Item.setValue(chiCtx, cty.UnknownVal(ety)) + val, itemDiags := e.Each.Value(chiCtx) + diags = append(diags, itemDiags...) + e.Item.clearValue(chiCtx) // clean up our temporary value + resultTys = append(resultTys, val.Type()) + } + return cty.Tuple(resultTys), diags + default: + // Should never happen because of our promotion to list above. + return cty.DynamicPseudoType, diags + } + } + + if !sourceVal.IsKnown() { + // We can't produce a known result in this case, but we'll still + // indicate what the result type would be, allowing any downstream type + // checking to proceed. + ty, tyDiags := resultTy() + diags = append(diags, tyDiags...) + return cty.UnknownVal(ty), diags } vals := make([]cty.Value, 0, sourceVal.LengthInt()) @@ -1179,15 +1330,28 @@ func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { e.Item.clearValue(ctx) // clean up our temporary value if !isKnown { - return cty.DynamicVal, diags + // We'll ingore the resultTy diagnostics in this case since they + // will just be the same errors we saw while iterating above. + ty, _ := resultTy() + return cty.UnknownVal(ty), diags } - return cty.TupleVal(vals), diags + switch { + case sourceTy.IsListType() || sourceTy.IsSetType(): + if len(vals) == 0 { + ty, tyDiags := resultTy() + diags = append(diags, tyDiags...) + return cty.ListValEmpty(ty.ElementType()), diags + } + return cty.ListVal(vals), diags + default: + return cty.TupleVal(vals), diags + } } func (e *SplatExpr) walkChildNodes(w internalWalkFunc) { - e.Source = w(e.Source).(Expression) - e.Each = w(e.Each).(Expression) + w(e.Source) + w(e.Each) } func (e *SplatExpr) Range() hcl.Range { @@ -1211,13 +1375,24 @@ func (e *SplatExpr) StartRange() hcl.Range { // assigns it a value. type AnonSymbolExpr struct { SrcRange hcl.Range - values map[*hcl.EvalContext]cty.Value + + // values and its associated lock are used to isolate concurrent + // evaluations of a symbol from one another. It is the calling application's + // responsibility to ensure that the same splat expression is not evalauted + // concurrently within the _same_ EvalContext, but it is fine and safe to + // do cuncurrent evaluations with distinct EvalContexts. + values map[*hcl.EvalContext]cty.Value + valuesLock sync.RWMutex } func (e *AnonSymbolExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { if ctx == nil { return cty.DynamicVal, nil } + + e.valuesLock.RLock() + defer e.valuesLock.RUnlock() + val, exists := e.values[ctx] if !exists { return cty.DynamicVal, nil @@ -1228,6 +1403,9 @@ func (e *AnonSymbolExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics // setValue sets a temporary local value for the expression when evaluated // in the given context, which must be non-nil. func (e *AnonSymbolExpr) setValue(ctx *hcl.EvalContext, val cty.Value) { + e.valuesLock.Lock() + defer e.valuesLock.Unlock() + if e.values == nil { e.values = make(map[*hcl.EvalContext]cty.Value) } @@ -1238,6 +1416,9 @@ func (e *AnonSymbolExpr) setValue(ctx *hcl.EvalContext, val cty.Value) { } func (e *AnonSymbolExpr) clearValue(ctx *hcl.EvalContext) { + e.valuesLock.Lock() + defer e.valuesLock.Unlock() + if e.values == nil { return } diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go index 9a5da043be..7f59f1a275 100644 --- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go @@ -129,8 +129,8 @@ type BinaryOpExpr struct { } func (e *BinaryOpExpr) walkChildNodes(w internalWalkFunc) { - e.LHS = w(e.LHS).(Expression) - e.RHS = w(e.RHS).(Expression) + w(e.LHS) + w(e.RHS) } func (e *BinaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { @@ -149,21 +149,25 @@ func (e *BinaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) lhsVal, err := convert.Convert(givenLHSVal, lhsParam.Type) if err != nil { diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid operand", - Detail: fmt.Sprintf("Unsuitable value for left operand: %s.", err), - Subject: e.LHS.Range().Ptr(), - Context: &e.SrcRange, + Severity: hcl.DiagError, + Summary: "Invalid operand", + Detail: fmt.Sprintf("Unsuitable value for left operand: %s.", err), + Subject: e.LHS.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.LHS, + EvalContext: ctx, }) } rhsVal, err := convert.Convert(givenRHSVal, rhsParam.Type) if err != nil { diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid operand", - Detail: fmt.Sprintf("Unsuitable value for right operand: %s.", err), - Subject: e.RHS.Range().Ptr(), - Context: &e.SrcRange, + Severity: hcl.DiagError, + Summary: "Invalid operand", + Detail: fmt.Sprintf("Unsuitable value for right operand: %s.", err), + Subject: e.RHS.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.RHS, + EvalContext: ctx, }) } @@ -178,10 +182,12 @@ func (e *BinaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) if err != nil { diags = append(diags, &hcl.Diagnostic{ // FIXME: This diagnostic is useless. - Severity: hcl.DiagError, - Summary: "Operation failed", - Detail: fmt.Sprintf("Error during operation: %s.", err), - Subject: &e.SrcRange, + Severity: hcl.DiagError, + Summary: "Operation failed", + Detail: fmt.Sprintf("Error during operation: %s.", err), + Subject: &e.SrcRange, + Expression: e, + EvalContext: ctx, }) return cty.UnknownVal(e.Op.Type), diags } @@ -206,7 +212,7 @@ type UnaryOpExpr struct { } func (e *UnaryOpExpr) walkChildNodes(w internalWalkFunc) { - e.Val = w(e.Val).(Expression) + w(e.Val) } func (e *UnaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { @@ -219,11 +225,13 @@ func (e *UnaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { val, err := convert.Convert(givenVal, param.Type) if err != nil { diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid operand", - Detail: fmt.Sprintf("Unsuitable value for unary operand: %s.", err), - Subject: e.Val.Range().Ptr(), - Context: &e.SrcRange, + Severity: hcl.DiagError, + Summary: "Invalid operand", + Detail: fmt.Sprintf("Unsuitable value for unary operand: %s.", err), + Subject: e.Val.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.Val, + EvalContext: ctx, }) } @@ -238,10 +246,12 @@ func (e *UnaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { if err != nil { diags = append(diags, &hcl.Diagnostic{ // FIXME: This diagnostic is useless. - Severity: hcl.DiagError, - Summary: "Operation failed", - Detail: fmt.Sprintf("Error during operation: %s.", err), - Subject: &e.SrcRange, + Severity: hcl.DiagError, + Summary: "Operation failed", + Detail: fmt.Sprintf("Error during operation: %s.", err), + Subject: &e.SrcRange, + Expression: e, + EvalContext: ctx, }) return cty.UnknownVal(e.Op.Type), diags } diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go index a1c472754d..fa79e3d08f 100644 --- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go @@ -16,8 +16,8 @@ type TemplateExpr struct { } func (e *TemplateExpr) walkChildNodes(w internalWalkFunc) { - for i, part := range e.Parts { - e.Parts[i] = w(part).(Expression) + for _, part := range e.Parts { + w(part) } } @@ -37,8 +37,10 @@ func (e *TemplateExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) Detail: fmt.Sprintf( "The expression result is null. Cannot include a null value in a string template.", ), - Subject: part.Range().Ptr(), - Context: &e.SrcRange, + Subject: part.Range().Ptr(), + Context: &e.SrcRange, + Expression: part, + EvalContext: ctx, }) continue } @@ -61,8 +63,10 @@ func (e *TemplateExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) "Cannot include the given value in a string template: %s.", err.Error(), ), - Subject: part.Range().Ptr(), - Context: &e.SrcRange, + Subject: part.Range().Ptr(), + Context: &e.SrcRange, + Expression: part, + EvalContext: ctx, }) continue } @@ -94,7 +98,7 @@ type TemplateJoinExpr struct { } func (e *TemplateJoinExpr) walkChildNodes(w internalWalkFunc) { - e.Tuple = w(e.Tuple).(Expression) + w(e.Tuple) } func (e *TemplateJoinExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { @@ -127,7 +131,9 @@ func (e *TemplateJoinExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti Detail: fmt.Sprintf( "An iteration result is null. Cannot include a null value in a string template.", ), - Subject: e.Range().Ptr(), + Subject: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, }) continue } @@ -143,7 +149,9 @@ func (e *TemplateJoinExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti "Cannot include one of the interpolation results into the string template: %s.", err.Error(), ), - Subject: e.Range().Ptr(), + Subject: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, }) continue } @@ -176,7 +184,7 @@ type TemplateWrapExpr struct { } func (e *TemplateWrapExpr) walkChildNodes(w internalWalkFunc) { - e.Wrapped = w(e.Wrapped).(Expression) + w(e.Wrapped) } func (e *TemplateWrapExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go index 4d41b6b662..c8c97f37cd 100644 --- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go @@ -3,6 +3,8 @@ package hclsyntax import ( "bytes" "fmt" + + "github.com/hashicorp/hcl2/hcl" ) type navigation struct { @@ -39,3 +41,19 @@ func (n navigation) ContextString(offset int) string { } return buf.String() } + +func (n navigation) ContextDefRange(offset int) hcl.Range { + var block *Block + for _, candidate := range n.root.Blocks { + if candidate.Range().ContainsOffset(offset) { + block = candidate + break + } + } + + if block == nil { + return hcl.Range{} + } + + return block.DefRange() +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go index fd426d4a6c..75812e63dd 100644 --- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go @@ -19,4 +19,4 @@ type Node interface { Range() hcl.Range } -type internalWalkFunc func(Node) Node +type internalWalkFunc func(Node) diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go index c35f94439a..551360298f 100644 --- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go @@ -9,7 +9,6 @@ import ( "github.com/apparentlymart/go-textseg/textseg" "github.com/hashicorp/hcl2/hcl" "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" ) type parser struct { @@ -55,7 +54,7 @@ Token: Severity: hcl.DiagError, Summary: "Attribute redefined", Detail: fmt.Sprintf( - "The attribute %q was already defined at %s. Each attribute may be defined only once.", + "The argument %q was already set at %s. Each argument may be set only once.", titem.Name, existing.NameRange.String(), ), Subject: &titem.NameRange, @@ -80,15 +79,15 @@ Token: if bad.Type == TokenOQuote { diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, - Summary: "Invalid attribute name", - Detail: "Attribute names must not be quoted.", + Summary: "Invalid argument name", + Detail: "Argument names must not be quoted.", Subject: &bad.Range, }) } else { diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, - Summary: "Attribute or block definition required", - Detail: "An attribute or block definition is required here.", + Summary: "Argument or block definition required", + Detail: "An argument or block definition is required here.", Subject: &bad.Range, }) } @@ -120,8 +119,8 @@ func (p *parser) ParseBodyItem() (Node, hcl.Diagnostics) { return nil, hcl.Diagnostics{ { Severity: hcl.DiagError, - Summary: "Attribute or block definition required", - Detail: "An attribute or block definition is required here.", + Summary: "Argument or block definition required", + Detail: "An argument or block definition is required here.", Subject: &ident.Range, }, } @@ -131,16 +130,16 @@ func (p *parser) ParseBodyItem() (Node, hcl.Diagnostics) { switch next.Type { case TokenEqual: - return p.finishParsingBodyAttribute(ident) - case TokenOQuote, TokenOBrace: + return p.finishParsingBodyAttribute(ident, false) + case TokenOQuote, TokenOBrace, TokenIdent: return p.finishParsingBodyBlock(ident) default: p.recoverAfterBodyItem() return nil, hcl.Diagnostics{ { Severity: hcl.DiagError, - Summary: "Attribute or block definition required", - Detail: "An attribute or block definition is required here. To define an attribute, use the equals sign \"=\" to introduce the attribute value.", + Summary: "Argument or block definition required", + Detail: "An argument or block definition is required here. To set an argument, use the equals sign \"=\" to introduce the argument value.", Subject: &ident.Range, }, } @@ -149,7 +148,72 @@ func (p *parser) ParseBodyItem() (Node, hcl.Diagnostics) { return nil, nil } -func (p *parser) finishParsingBodyAttribute(ident Token) (Node, hcl.Diagnostics) { +// parseSingleAttrBody is a weird variant of ParseBody that deals with the +// body of a nested block containing only one attribute value all on a single +// line, like foo { bar = baz } . It expects to find a single attribute item +// immediately followed by the end token type with no intervening newlines. +func (p *parser) parseSingleAttrBody(end TokenType) (*Body, hcl.Diagnostics) { + ident := p.Read() + if ident.Type != TokenIdent { + p.recoverAfterBodyItem() + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Argument or block definition required", + Detail: "An argument or block definition is required here.", + Subject: &ident.Range, + }, + } + } + + var attr *Attribute + var diags hcl.Diagnostics + + next := p.Peek() + + switch next.Type { + case TokenEqual: + node, attrDiags := p.finishParsingBodyAttribute(ident, true) + diags = append(diags, attrDiags...) + attr = node.(*Attribute) + case TokenOQuote, TokenOBrace, TokenIdent: + p.recoverAfterBodyItem() + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Argument definition required", + Detail: fmt.Sprintf("A single-line block definition can contain only a single argument. If you meant to define argument %q, use an equals sign to assign it a value. To define a nested block, place it on a line of its own within its parent block.", ident.Bytes), + Subject: hcl.RangeBetween(ident.Range, next.Range).Ptr(), + }, + } + default: + p.recoverAfterBodyItem() + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Argument or block definition required", + Detail: "An argument or block definition is required here. To set an argument, use the equals sign \"=\" to introduce the argument value.", + Subject: &ident.Range, + }, + } + } + + return &Body{ + Attributes: Attributes{ + string(ident.Bytes): attr, + }, + + SrcRange: attr.SrcRange, + EndRange: hcl.Range{ + Filename: attr.SrcRange.Filename, + Start: attr.SrcRange.End, + End: attr.SrcRange.End, + }, + }, diags + +} + +func (p *parser) finishParsingBodyAttribute(ident Token, singleLine bool) (Node, hcl.Diagnostics) { eqTok := p.Read() // eat equals token if eqTok.Type != TokenEqual { // should never happen if caller behaves @@ -166,32 +230,33 @@ func (p *parser) finishParsingBodyAttribute(ident Token) (Node, hcl.Diagnostics) endRange = p.PrevRange() p.recoverAfterBodyItem() } else { - end := p.Peek() - if end.Type != TokenNewline { - if !p.recovery { - if end.Type == TokenEOF { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing newline after attribute definition", - Detail: "A newline is required after an attribute definition at the end of a file.", - Subject: &end.Range, - Context: hcl.RangeBetween(ident.Range, end.Range).Ptr(), - }) - } else { + endRange = p.PrevRange() + if !singleLine { + end := p.Peek() + if end.Type != TokenNewline && end.Type != TokenEOF { + if !p.recovery { + summary := "Missing newline after argument" + detail := "An argument definition must end with a newline." + + if end.Type == TokenComma { + summary = "Unexpected comma after argument" + detail = "Argument definitions must be separated by newlines, not commas. " + detail + } + diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, - Summary: "Missing newline after attribute definition", - Detail: "An attribute definition must end with a newline.", + Summary: summary, + Detail: detail, Subject: &end.Range, Context: hcl.RangeBetween(ident.Range, end.Range).Ptr(), }) } + endRange = p.PrevRange() + p.recoverAfterBodyItem() + } else { + endRange = p.PrevRange() + p.Read() // eat newline } - endRange = p.PrevRange() - p.recoverAfterBodyItem() - } else { - endRange = p.PrevRange() - p.Read() // eat newline } } @@ -228,19 +293,15 @@ Token: diags = append(diags, labelDiags...) labels = append(labels, label) labelRanges = append(labelRanges, labelRange) - if labelDiags.HasErrors() { - p.recoverAfterBodyItem() - return &Block{ - Type: blockType, - Labels: labels, - Body: nil, - - TypeRange: ident.Range, - LabelRanges: labelRanges, - OpenBraceRange: ident.Range, // placeholder - CloseBraceRange: ident.Range, // placeholder - }, diags - } + // parseQuoteStringLiteral recovers up to the closing quote + // if it encounters problems, so we can continue looking for + // more labels and eventually the block body even. + + case TokenIdent: + tok = p.Read() // eat token + label, labelRange := string(tok.Bytes), tok.Range + labels = append(labels, label) + labelRanges = append(labelRanges, labelRange) default: switch tok.Type { @@ -248,7 +309,7 @@ Token: diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Invalid block definition", - Detail: "The equals sign \"=\" indicates an attribute definition, and must not be used when defining a block.", + Detail: "The equals sign \"=\" indicates an argument definition, and must not be used when defining a block.", Subject: &tok.Range, Context: hcl.RangeBetween(ident.Range, tok.Range).Ptr(), }) @@ -277,7 +338,10 @@ Token: return &Block{ Type: blockType, Labels: labels, - Body: nil, + Body: &Body{ + SrcRange: ident.Range, + EndRange: ident.Range, + }, TypeRange: ident.Range, LabelRanges: labelRanges, @@ -289,32 +353,66 @@ Token: // Once we fall out here, the peeker is pointed just after our opening // brace, so we can begin our nested body parsing. - body, bodyDiags := p.ParseBody(TokenCBrace) + var body *Body + var bodyDiags hcl.Diagnostics + switch p.Peek().Type { + case TokenNewline, TokenEOF, TokenCBrace: + body, bodyDiags = p.ParseBody(TokenCBrace) + default: + // Special one-line, single-attribute block parsing mode. + body, bodyDiags = p.parseSingleAttrBody(TokenCBrace) + switch p.Peek().Type { + case TokenCBrace: + p.Read() // the happy path - just consume the closing brace + case TokenComma: + // User seems to be trying to use the object-constructor + // comma-separated style, which isn't permitted for blocks. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid single-argument block definition", + Detail: "Single-line block syntax can include only one argument definition. To define multiple arguments, use the multi-line block syntax with one argument definition per line.", + Subject: p.Peek().Range.Ptr(), + }) + p.recover(TokenCBrace) + case TokenNewline: + // We don't allow weird mixtures of single and multi-line syntax. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid single-argument block definition", + Detail: "An argument definition on the same line as its containing block creates a single-line block definition, which must also be closed on the same line. Place the block's closing brace immediately after the argument definition.", + Subject: p.Peek().Range.Ptr(), + }) + p.recover(TokenCBrace) + default: + // Some other weird thing is going on. Since we can't guess a likely + // user intent for this one, we'll skip it if we're already in + // recovery mode. + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid single-argument block definition", + Detail: "A single-line block definition must end with a closing brace immediately after its single argument definition.", + Subject: p.Peek().Range.Ptr(), + }) + } + p.recover(TokenCBrace) + } + } diags = append(diags, bodyDiags...) cBraceRange := p.PrevRange() eol := p.Peek() - if eol.Type == TokenNewline { + if eol.Type == TokenNewline || eol.Type == TokenEOF { p.Read() // eat newline } else { if !p.recovery { - if eol.Type == TokenEOF { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing newline after block definition", - Detail: "A newline is required after a block definition at the end of a file.", - Subject: &eol.Range, - Context: hcl.RangeBetween(ident.Range, eol.Range).Ptr(), - }) - } else { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing newline after block definition", - Detail: "A block definition must end with a newline.", - Subject: &eol.Range, - Context: hcl.RangeBetween(ident.Range, eol.Range).Ptr(), - }) - } + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing newline after block definition", + Detail: "A block definition must end with a newline.", + Subject: &eol.Range, + Context: hcl.RangeBetween(ident.Range, eol.Range).Ptr(), + }) } p.recoverAfterBodyItem() } @@ -473,7 +571,14 @@ func (p *parser) parseBinaryOps(ops []map[TokenType]*Operation) (Expression, hcl func (p *parser) parseExpressionWithTraversals() (Expression, hcl.Diagnostics) { term, diags := p.parseExpressionTerm() - ret := term + ret, moreDiags := p.parseExpressionTraversals(term) + diags = append(diags, moreDiags...) + return ret, diags +} + +func (p *parser) parseExpressionTraversals(from Expression) (Expression, hcl.Diagnostics) { + var diags hcl.Diagnostics + ret := from Traversal: for { @@ -497,6 +602,53 @@ Traversal: ret = makeRelativeTraversal(ret, step, rng) + case TokenNumberLit: + // This is a weird form we inherited from HIL, allowing numbers + // to be used as attributes as a weird way of writing [n]. + // This was never actually a first-class thing in HIL, but + // HIL tolerated sequences like .0. in its variable names and + // calling applications like Terraform exploited that to + // introduce indexing syntax where none existed. + numTok := p.Read() // eat token + attrTok = numTok + + // This syntax is ambiguous if multiple indices are used in + // succession, like foo.0.1.baz: that actually parses as + // a fractional number 0.1. Since we're only supporting this + // syntax for compatibility with legacy Terraform + // configurations, and Terraform does not tend to have lists + // of lists, we'll choose to reject that here with a helpful + // error message, rather than failing later because the index + // isn't a whole number. + if dotIdx := bytes.IndexByte(numTok.Bytes, '.'); dotIdx >= 0 { + first := numTok.Bytes[:dotIdx] + second := numTok.Bytes[dotIdx+1:] + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid legacy index syntax", + Detail: fmt.Sprintf("When using the legacy index syntax, chaining two indexes together is not permitted. Use the proper index syntax instead, like [%s][%s].", first, second), + Subject: &attrTok.Range, + }) + rng := hcl.RangeBetween(dot.Range, numTok.Range) + step := hcl.TraverseIndex{ + Key: cty.DynamicVal, + SrcRange: rng, + } + ret = makeRelativeTraversal(ret, step, rng) + break + } + + numVal, numDiags := p.numberLitValue(numTok) + diags = append(diags, numDiags...) + + rng := hcl.RangeBetween(dot.Range, numTok.Range) + step := hcl.TraverseIndex{ + Key: numVal, + SrcRange: rng, + } + + ret = makeRelativeTraversal(ret, step, rng) + case TokenStar: // "Attribute-only" splat expression. // (This is a kinda weird construct inherited from HIL, which @@ -517,6 +669,27 @@ Traversal: // into a list, for expressions like: // foo.bar.*.baz.0.foo numTok := p.Read() + + // Weird special case if the user writes something + // like foo.bar.*.baz.0.0.foo, where 0.0 parses + // as a number. + if dotIdx := bytes.IndexByte(numTok.Bytes, '.'); dotIdx >= 0 { + first := numTok.Bytes[:dotIdx] + second := numTok.Bytes[dotIdx+1:] + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid legacy index syntax", + Detail: fmt.Sprintf("When using the legacy index syntax, chaining two indexes together is not permitted. Use the proper index syntax with a full splat expression [*] instead, like [%s][%s].", first, second), + Subject: &attrTok.Range, + }) + trav = append(trav, hcl.TraverseIndex{ + Key: cty.DynamicVal, + SrcRange: hcl.RangeBetween(dot.Range, numTok.Range), + }) + lastRange = numTok.Range + continue + } + numVal, numDiags := p.numberLitValue(numTok) diags = append(diags, numDiags...) trav = append(trav, hcl.TraverseIndex{ @@ -603,44 +776,81 @@ Traversal: // the key value is something constant. open := p.Read() - // TODO: If we have a TokenStar inside our brackets, parse as - // a Splat expression: foo[*].baz[0]. - var close Token - p.PushIncludeNewlines(false) // arbitrary newlines allowed in brackets - keyExpr, keyDiags := p.ParseExpression() - diags = append(diags, keyDiags...) - if p.recovery && keyDiags.HasErrors() { - close = p.recover(TokenCBrack) - } else { - close = p.Read() + switch p.Peek().Type { + case TokenStar: + // This is a full splat expression, like foo[*], which consumes + // the rest of the traversal steps after it using a recursive + // call to this function. + p.Read() // consume star + close := p.Read() if close.Type != TokenCBrack && !p.recovery { diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, - Summary: "Missing close bracket on index", - Detail: "The index operator must end with a closing bracket (\"]\").", + Summary: "Missing close bracket on splat index", + Detail: "The star for a full splat operator must be immediately followed by a closing bracket (\"]\").", Subject: &close.Range, }) close = p.recover(TokenCBrack) } - } - p.PopIncludeNewlines() + // Splat expressions use a special "anonymous symbol" as a + // placeholder in an expression to be evaluated once for each + // item in the source expression. + itemExpr := &AnonSymbolExpr{ + SrcRange: hcl.RangeBetween(open.Range, close.Range), + } + // Now we'll recursively call this same function to eat any + // remaining traversal steps against the anonymous symbol. + travExpr, nestedDiags := p.parseExpressionTraversals(itemExpr) + diags = append(diags, nestedDiags...) - if lit, isLit := keyExpr.(*LiteralValueExpr); isLit { - litKey, _ := lit.Value(nil) - rng := hcl.RangeBetween(open.Range, close.Range) - step := hcl.TraverseIndex{ - Key: litKey, - SrcRange: rng, + ret = &SplatExpr{ + Source: ret, + Each: travExpr, + Item: itemExpr, + + SrcRange: hcl.RangeBetween(open.Range, travExpr.Range()), + MarkerRange: hcl.RangeBetween(open.Range, close.Range), } - ret = makeRelativeTraversal(ret, step, rng) - } else { - rng := hcl.RangeBetween(open.Range, close.Range) - ret = &IndexExpr{ - Collection: ret, - Key: keyExpr, - SrcRange: rng, - OpenRange: open.Range, + default: + + var close Token + p.PushIncludeNewlines(false) // arbitrary newlines allowed in brackets + keyExpr, keyDiags := p.ParseExpression() + diags = append(diags, keyDiags...) + if p.recovery && keyDiags.HasErrors() { + close = p.recover(TokenCBrack) + } else { + close = p.Read() + if close.Type != TokenCBrack && !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing close bracket on index", + Detail: "The index operator must end with a closing bracket (\"]\").", + Subject: &close.Range, + }) + close = p.recover(TokenCBrack) + } + } + p.PopIncludeNewlines() + + if lit, isLit := keyExpr.(*LiteralValueExpr); isLit { + litKey, _ := lit.Value(nil) + rng := hcl.RangeBetween(open.Range, close.Range) + step := hcl.TraverseIndex{ + Key: litKey, + SrcRange: rng, + } + ret = makeRelativeTraversal(ret, step, rng) + } else { + rng := hcl.RangeBetween(open.Range, close.Range) + ret = &IndexExpr{ + Collection: ret, + Key: keyExpr, + + SrcRange: rng, + OpenRange: open.Range, + } } } @@ -759,7 +969,7 @@ func (p *parser) parseExpressionTerm() (Expression, hcl.Diagnostics) { case TokenOQuote, TokenOHeredoc: open := p.Read() // eat opening marker closer := p.oppositeBracket(open.Type) - exprs, passthru, _, diags := p.parseTemplateInner(closer) + exprs, passthru, _, diags := p.parseTemplateInner(closer, tokenOpensFlushHeredoc(open)) closeRange := p.PrevRange() @@ -837,11 +1047,10 @@ func (p *parser) parseExpressionTerm() (Expression, hcl.Diagnostics) { } func (p *parser) numberLitValue(tok Token) (cty.Value, hcl.Diagnostics) { - // We'll lean on the cty converter to do the conversion, to ensure that - // the behavior is the same as what would happen if converting a - // non-literal string to a number. - numStrVal := cty.StringVal(string(tok.Bytes)) - numVal, err := convert.Convert(numStrVal, cty.Number) + // The cty.ParseNumberVal is always the same behavior as converting a + // string to a number, ensuring we always interpret decimal numbers in + // the same way. + numVal, err := cty.ParseNumberVal(string(tok.Bytes)) if err != nil { ret := cty.UnknownVal(cty.Number) return ret, hcl.Diagnostics{ @@ -1033,13 +1242,19 @@ func (p *parser) parseObjectCons() (Expression, hcl.Diagnostics) { panic("parseObjectCons called without peeker pointing to open brace") } - p.PushIncludeNewlines(true) - defer p.PopIncludeNewlines() - - if forKeyword.TokenMatches(p.Peek()) { + // We must temporarily stop looking at newlines here while we check for + // a "for" keyword, since for expressions are _not_ newline-sensitive, + // even though object constructors are. + p.PushIncludeNewlines(false) + isFor := forKeyword.TokenMatches(p.Peek()) + p.PopIncludeNewlines() + if isFor { return p.finishParsingForExpr(open) } + p.PushIncludeNewlines(true) + defer p.PopIncludeNewlines() + var close Token var diags hcl.Diagnostics @@ -1078,19 +1293,36 @@ func (p *parser) parseObjectCons() (Expression, hcl.Diagnostics) { next = p.Peek() if next.Type != TokenEqual && next.Type != TokenColon { if !p.recovery { - if next.Type == TokenNewline || next.Type == TokenComma { + switch next.Type { + case TokenNewline, TokenComma: diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, - Summary: "Missing item value", - Detail: "Expected an item value, introduced by an equals sign (\"=\").", + Summary: "Missing attribute value", + Detail: "Expected an attribute value, introduced by an equals sign (\"=\").", Subject: &next.Range, Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), }) - } else { + case TokenIdent: + // Although this might just be a plain old missing equals + // sign before a reference, one way to get here is to try + // to write an attribute name containing a period followed + // by a digit, which was valid in HCL1, like this: + // foo1.2_bar = "baz" + // We can't know exactly what the user intended here, but + // we'll augment our message with an extra hint in this case + // in case it is helpful. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing key/value separator", + Detail: "Expected an equals sign (\"=\") to mark the beginning of the attribute value. If you intended to given an attribute name containing periods or spaces, write the name in quotes to create a string literal.", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + default: diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Missing key/value separator", - Detail: "Expected an equals sign (\"=\") to mark the beginning of the item value.", + Detail: "Expected an equals sign (\"=\") to mark the beginning of the attribute value.", Subject: &next.Range, Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), }) @@ -1128,8 +1360,8 @@ func (p *parser) parseObjectCons() (Expression, hcl.Diagnostics) { if !p.recovery { diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, - Summary: "Missing item separator", - Detail: "Expected a newline or comma to mark the beginning of the next item.", + Summary: "Missing attribute separator", + Detail: "Expected a newline or comma to mark the beginning of the next attribute.", Subject: &next.Range, Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), }) @@ -1151,6 +1383,8 @@ func (p *parser) parseObjectCons() (Expression, hcl.Diagnostics) { } func (p *parser) finishParsingForExpr(open Token) (Expression, hcl.Diagnostics) { + p.PushIncludeNewlines(false) + defer p.PopIncludeNewlines() introducer := p.Read() if !forKeyword.TokenMatches(introducer) { // Should never happen if callers are behaving @@ -1223,7 +1457,7 @@ func (p *parser) finishParsingForExpr(open Token) (Expression, hcl.Diagnostics) diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Invalid 'for' expression", - Detail: "For expression requires 'in' keyword after names.", + Detail: "For expression requires the 'in' keyword after its name declarations.", Subject: p.Peek().Range.Ptr(), Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), }) @@ -1251,7 +1485,7 @@ func (p *parser) finishParsingForExpr(open Token) (Expression, hcl.Diagnostics) diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Invalid 'for' expression", - Detail: "For expression requires colon after collection expression.", + Detail: "For expression requires a colon after the collection expression.", Subject: p.Peek().Range.Ptr(), Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), }) @@ -1405,7 +1639,7 @@ Token: case TokenTemplateControl, TokenTemplateInterp: which := "$" if tok.Type == TokenTemplateControl { - which = "!" + which = "%" } diags = append(diags, &hcl.Diagnostic{ @@ -1418,7 +1652,16 @@ Token: Subject: &tok.Range, Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(), }) - p.recover(TokenTemplateSeqEnd) + + // Now that we're returning an error callers won't attempt to use + // the result for any real operations, but they might try to use + // the partial AST for other analyses, so we'll leave a marker + // to indicate that there was something invalid in the string to + // help avoid misinterpretation of the partial result + ret.WriteString(which) + ret.WriteString("{ ... }") + + p.recover(TokenTemplateSeqEnd) // we'll try to keep parsing after the sequence ends case TokenEOF: diags = append(diags, &hcl.Diagnostic{ @@ -1439,7 +1682,7 @@ Token: Subject: &tok.Range, Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(), }) - p.recover(TokenOQuote) + p.recover(TokenCQuote) break Token } diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go index 3711067eeb..e158bd81c6 100644 --- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go @@ -2,6 +2,7 @@ package hclsyntax import ( "fmt" + "github.com/apparentlymart/go-textseg/textseg" "strings" "unicode" @@ -10,11 +11,11 @@ import ( ) func (p *parser) ParseTemplate() (Expression, hcl.Diagnostics) { - return p.parseTemplate(TokenEOF) + return p.parseTemplate(TokenEOF, false) } -func (p *parser) parseTemplate(end TokenType) (Expression, hcl.Diagnostics) { - exprs, passthru, rng, diags := p.parseTemplateInner(end) +func (p *parser) parseTemplate(end TokenType, flushHeredoc bool) (Expression, hcl.Diagnostics) { + exprs, passthru, rng, diags := p.parseTemplateInner(end, flushHeredoc) if passthru { if len(exprs) != 1 { @@ -32,8 +33,11 @@ func (p *parser) parseTemplate(end TokenType) (Expression, hcl.Diagnostics) { }, diags } -func (p *parser) parseTemplateInner(end TokenType) ([]Expression, bool, hcl.Range, hcl.Diagnostics) { +func (p *parser) parseTemplateInner(end TokenType, flushHeredoc bool) ([]Expression, bool, hcl.Range, hcl.Diagnostics) { parts, diags := p.parseTemplateParts(end) + if flushHeredoc { + flushHeredocTemplateParts(parts) // Trim off leading spaces on lines per the flush heredoc spec + } tp := templateParser{ Tokens: parts.Tokens, SrcRange: parts.SrcRange, @@ -649,6 +653,73 @@ Token: return ret, diags } +// flushHeredocTemplateParts modifies in-place the line-leading literal strings +// to apply the flush heredoc processing rule: find the line with the smallest +// number of whitespace characters as prefix and then trim that number of +// characters from all of the lines. +// +// This rule is applied to static tokens rather than to the rendered result, +// so interpolating a string with leading whitespace cannot affect the chosen +// prefix length. +func flushHeredocTemplateParts(parts *templateParts) { + if len(parts.Tokens) == 0 { + // Nothing to do + return + } + + const maxInt = int((^uint(0)) >> 1) + + minSpaces := maxInt + newline := true + var adjust []*templateLiteralToken + for _, ttok := range parts.Tokens { + if newline { + newline = false + var spaces int + if lit, ok := ttok.(*templateLiteralToken); ok { + orig := lit.Val + trimmed := strings.TrimLeftFunc(orig, unicode.IsSpace) + // If a token is entirely spaces and ends with a newline + // then it's a "blank line" and thus not considered for + // space-prefix-counting purposes. + if len(trimmed) == 0 && strings.HasSuffix(orig, "\n") { + spaces = maxInt + } else { + spaceBytes := len(lit.Val) - len(trimmed) + spaces, _ = textseg.TokenCount([]byte(orig[:spaceBytes]), textseg.ScanGraphemeClusters) + adjust = append(adjust, lit) + } + } else if _, ok := ttok.(*templateEndToken); ok { + break // don't process the end token since it never has spaces before it + } + if spaces < minSpaces { + minSpaces = spaces + } + } + if lit, ok := ttok.(*templateLiteralToken); ok { + if strings.HasSuffix(lit.Val, "\n") { + newline = true // The following token, if any, begins a new line + } + } + } + + for _, lit := range adjust { + // Since we want to count space _characters_ rather than space _bytes_, + // we can't just do a straightforward slice operation here and instead + // need to hunt for the split point with a scanner. + valBytes := []byte(lit.Val) + spaceByteCount := 0 + for i := 0; i < minSpaces; i++ { + adv, _, _ := textseg.ScanGraphemeClusters(valBytes, true) + spaceByteCount += adv + valBytes = valBytes[adv:] + } + lit.Val = lit.Val[spaceByteCount:] + lit.SrcRange.Start.Column += minSpaces + lit.SrcRange.Start.Byte += spaceByteCount + } +} + type templateParts struct { Tokens []templateToken SrcRange hcl.Range diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.go index de1f524caf..2895ade758 100644 --- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.go +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.go @@ -1,10 +1,10 @@ -// line 1 "scan_string_lit.rl" +//line scan_string_lit.rl:1 package hclsyntax // This file is generated from scan_string_lit.rl. DO NOT EDIT. -// line 9 "scan_string_lit.go" +//line scan_string_lit.go:9 var _hclstrtok_actions []byte = []byte{ 0, 1, 0, 1, 1, 2, 1, 0, } @@ -114,12 +114,12 @@ const hclstrtok_error int = 0 const hclstrtok_en_quoted int = 10 const hclstrtok_en_unquoted int = 4 -// line 10 "scan_string_lit.rl" +//line scan_string_lit.rl:10 func scanStringLit(data []byte, quoted bool) [][]byte { var ret [][]byte - // line 61 "scan_string_lit.rl" +//line scan_string_lit.rl:61 // Ragel state p := 0 // "Pointer" into data @@ -144,11 +144,11 @@ func scanStringLit(data []byte, quoted bool) [][]byte { ret = append(ret, data[ts:te]) }*/ - // line 154 "scan_string_lit.go" +//line scan_string_lit.go:154 { } - // line 158 "scan_string_lit.go" +//line scan_string_lit.go:158 { var _klen int var _trans int @@ -229,7 +229,7 @@ func scanStringLit(data []byte, quoted bool) [][]byte { _acts++ switch _hclstrtok_actions[_acts-1] { case 0: - // line 40 "scan_string_lit.rl" +//line scan_string_lit.rl:40 // If te is behind p then we've skipped over some literal // characters which we must now return. @@ -239,12 +239,12 @@ func scanStringLit(data []byte, quoted bool) [][]byte { ts = p case 1: - // line 48 "scan_string_lit.rl" +//line scan_string_lit.rl:48 te = p ret = append(ret, data[ts:te]) - // line 255 "scan_string_lit.go" +//line scan_string_lit.go:253 } } @@ -267,12 +267,12 @@ func scanStringLit(data []byte, quoted bool) [][]byte { __acts++ switch _hclstrtok_actions[__acts-1] { case 1: - // line 48 "scan_string_lit.rl" +//line scan_string_lit.rl:48 te = p ret = append(ret, data[ts:te]) - // line 281 "scan_string_lit.go" +//line scan_string_lit.go:278 } } } @@ -282,7 +282,7 @@ func scanStringLit(data []byte, quoted bool) [][]byte { } } - // line 89 "scan_string_lit.rl" +//line scan_string_lit.rl:89 if te < p { // Collect any leftover literal characters at the end of the input diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go index 7d557c08d1..30a08363ce 100644 --- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go @@ -1,4 +1,4 @@ -// line 1 "scan_tokens.rl" +//line scan_tokens.rl:1 package hclsyntax @@ -10,1539 +10,1573 @@ import ( // This file is generated from scan_tokens.rl. DO NOT EDIT. -// line 15 "scan_tokens.go" +//line scan_tokens.go:15 var _hcltok_actions []byte = []byte{ - 0, 1, 0, 1, 2, 1, 3, 1, 4, - 1, 5, 1, 6, 1, 7, 1, 8, + 0, 1, 0, 1, 1, 1, 2, 1, 3, + 1, 4, 1, 6, 1, 7, 1, 8, 1, 9, 1, 10, 1, 11, 1, 12, - 1, 13, 1, 14, 1, 15, 1, 18, - 1, 19, 1, 20, 1, 21, 1, 22, + 1, 13, 1, 14, 1, 15, 1, 16, + 1, 17, 1, 18, 1, 19, 1, 22, 1, 23, 1, 24, 1, 25, 1, 26, - 1, 27, 1, 30, 1, 31, 1, 32, - 1, 33, 1, 34, 1, 35, 1, 36, - 1, 37, 1, 38, 1, 39, 1, 42, - 1, 43, 1, 44, 1, 45, 1, 46, - 1, 47, 1, 48, 1, 54, 1, 55, - 1, 56, 1, 57, 1, 58, 1, 59, - 1, 60, 1, 61, 1, 62, 1, 63, - 1, 64, 1, 65, 1, 66, 1, 67, - 1, 68, 1, 69, 1, 70, 1, 71, - 1, 72, 1, 73, 1, 74, 1, 75, - 1, 76, 1, 77, 1, 78, 1, 79, - 1, 80, 1, 81, 1, 82, 1, 83, - 2, 0, 1, 2, 3, 16, 2, 3, - 17, 2, 3, 28, 2, 3, 29, 2, - 3, 40, 2, 3, 41, 2, 3, 49, - 2, 3, 50, 2, 3, 51, 2, 3, - 52, 2, 3, 53, + 1, 27, 1, 28, 1, 29, 1, 30, + 1, 31, 1, 34, 1, 35, 1, 36, + 1, 37, 1, 38, 1, 39, 1, 40, + 1, 41, 1, 42, 1, 43, 1, 46, + 1, 47, 1, 48, 1, 49, 1, 50, + 1, 51, 1, 52, 1, 57, 1, 58, + 1, 59, 1, 60, 1, 61, 1, 62, + 1, 63, 1, 64, 1, 65, 1, 66, + 1, 67, 1, 68, 1, 69, 1, 70, + 1, 71, 1, 72, 1, 73, 1, 74, + 1, 75, 1, 76, 1, 77, 1, 78, + 1, 79, 1, 80, 1, 81, 1, 82, + 1, 83, 1, 84, 1, 85, 2, 0, + 15, 2, 1, 15, 2, 2, 24, 2, + 2, 28, 2, 3, 24, 2, 3, 28, + 2, 4, 5, 2, 7, 0, 2, 7, + 1, 2, 7, 20, 2, 7, 21, 2, + 7, 32, 2, 7, 33, 2, 7, 44, + 2, 7, 45, 2, 7, 53, 2, 7, + 54, 2, 7, 55, 2, 7, 56, 3, + 7, 2, 20, 3, 7, 3, 20, } var _hcltok_key_offsets []int16 = []int16{ 0, 0, 1, 2, 3, 5, 10, 14, - 16, 57, 97, 143, 144, 148, 154, 154, - 156, 158, 167, 173, 180, 181, 184, 185, - 189, 194, 203, 207, 211, 219, 221, 223, - 225, 228, 260, 262, 264, 268, 272, 275, - 286, 299, 318, 331, 347, 359, 375, 390, - 411, 421, 433, 444, 458, 473, 483, 495, - 504, 516, 518, 522, 543, 552, 562, 568, - 574, 575, 624, 626, 630, 632, 638, 645, - 653, 660, 663, 669, 673, 677, 679, 683, - 687, 691, 697, 705, 713, 719, 721, 725, - 727, 733, 737, 741, 745, 749, 754, 761, - 767, 769, 771, 775, 777, 783, 787, 791, - 801, 806, 820, 835, 837, 845, 847, 852, - 866, 871, 873, 877, 878, 882, 888, 894, - 904, 914, 925, 933, 936, 939, 943, 947, - 949, 952, 952, 955, 957, 987, 989, 991, - 995, 1000, 1004, 1009, 1011, 1013, 1015, 1024, - 1028, 1032, 1038, 1040, 1048, 1056, 1068, 1071, - 1077, 1081, 1083, 1087, 1107, 1109, 1111, 1122, - 1128, 1130, 1132, 1134, 1138, 1144, 1150, 1152, - 1157, 1161, 1163, 1171, 1189, 1229, 1239, 1243, - 1245, 1247, 1248, 1252, 1256, 1260, 1264, 1268, - 1273, 1277, 1281, 1285, 1287, 1289, 1293, 1303, - 1307, 1309, 1313, 1317, 1321, 1334, 1336, 1338, - 1342, 1344, 1348, 1350, 1352, 1382, 1386, 1390, - 1394, 1397, 1404, 1409, 1420, 1424, 1440, 1454, - 1458, 1463, 1467, 1471, 1477, 1479, 1485, 1487, - 1491, 1493, 1499, 1504, 1509, 1519, 1521, 1523, - 1527, 1531, 1533, 1546, 1548, 1552, 1556, 1564, - 1566, 1570, 1572, 1573, 1576, 1581, 1583, 1585, - 1589, 1591, 1595, 1601, 1621, 1627, 1633, 1635, - 1636, 1646, 1647, 1655, 1662, 1664, 1667, 1669, - 1671, 1673, 1678, 1682, 1686, 1691, 1701, 1711, - 1715, 1719, 1733, 1759, 1769, 1771, 1773, 1776, - 1778, 1781, 1783, 1787, 1789, 1790, 1794, 1796, - 1799, 1806, 1814, 1816, 1818, 1822, 1824, 1830, - 1841, 1844, 1846, 1850, 1855, 1885, 1890, 1892, - 1895, 1900, 1914, 1921, 1935, 1940, 1953, 1957, - 1970, 1975, 1993, 1994, 2003, 2007, 2019, 2024, - 2031, 2038, 2045, 2047, 2051, 2073, 2078, 2079, - 2083, 2085, 2135, 2138, 2149, 2153, 2155, 2161, - 2167, 2169, 2174, 2176, 2180, 2182, 2183, 2185, - 2187, 2193, 2195, 2197, 2201, 2207, 2220, 2222, - 2228, 2232, 2240, 2251, 2259, 2262, 2292, 2298, - 2301, 2306, 2308, 2312, 2316, 2320, 2322, 2329, - 2331, 2340, 2347, 2355, 2357, 2377, 2389, 2393, - 2395, 2413, 2452, 2454, 2458, 2460, 2467, 2471, - 2499, 2501, 2503, 2505, 2507, 2510, 2512, 2516, - 2520, 2522, 2525, 2527, 2529, 2532, 2534, 2536, - 2537, 2539, 2541, 2545, 2549, 2552, 2565, 2567, - 2573, 2577, 2579, 2583, 2587, 2601, 2604, 2613, - 2615, 2619, 2625, 2625, 2627, 2629, 2638, 2644, - 2651, 2652, 2655, 2656, 2660, 2665, 2674, 2678, - 2682, 2690, 2692, 2694, 2696, 2699, 2731, 2733, - 2735, 2739, 2743, 2746, 2757, 2770, 2789, 2802, - 2818, 2830, 2846, 2861, 2882, 2892, 2904, 2915, - 2929, 2944, 2954, 2966, 2975, 2987, 2989, 2993, - 3014, 3023, 3033, 3039, 3045, 3046, 3095, 3097, - 3101, 3103, 3109, 3116, 3124, 3131, 3134, 3140, - 3144, 3148, 3150, 3154, 3158, 3162, 3168, 3176, - 3184, 3190, 3192, 3196, 3198, 3204, 3208, 3212, - 3216, 3220, 3225, 3232, 3238, 3240, 3242, 3246, - 3248, 3254, 3258, 3262, 3272, 3277, 3291, 3306, - 3308, 3316, 3318, 3323, 3337, 3342, 3344, 3348, - 3349, 3353, 3359, 3365, 3375, 3385, 3396, 3404, - 3407, 3410, 3414, 3418, 3420, 3423, 3423, 3426, - 3428, 3458, 3460, 3462, 3466, 3471, 3475, 3480, - 3482, 3484, 3486, 3495, 3499, 3503, 3509, 3511, - 3519, 3527, 3539, 3542, 3548, 3552, 3554, 3558, - 3578, 3580, 3582, 3593, 3599, 3601, 3603, 3605, - 3609, 3615, 3621, 3623, 3628, 3632, 3634, 3642, - 3660, 3700, 3710, 3714, 3716, 3718, 3719, 3723, - 3727, 3731, 3735, 3739, 3744, 3748, 3752, 3756, - 3758, 3760, 3764, 3774, 3778, 3780, 3784, 3788, - 3792, 3805, 3807, 3809, 3813, 3815, 3819, 3821, - 3823, 3853, 3857, 3861, 3865, 3868, 3875, 3880, - 3891, 3895, 3911, 3925, 3929, 3934, 3938, 3942, - 3948, 3950, 3956, 3958, 3962, 3964, 3970, 3975, - 3980, 3990, 3992, 3994, 3998, 4002, 4004, 4017, - 4019, 4023, 4027, 4035, 4037, 4041, 4043, 4044, - 4047, 4052, 4054, 4056, 4060, 4062, 4066, 4072, - 4092, 4098, 4104, 4106, 4107, 4117, 4118, 4126, - 4133, 4135, 4138, 4140, 4142, 4144, 4149, 4153, - 4157, 4162, 4172, 4182, 4186, 4190, 4204, 4230, - 4240, 4242, 4244, 4247, 4249, 4252, 4254, 4258, - 4260, 4261, 4265, 4267, 4269, 4276, 4280, 4287, - 4294, 4303, 4319, 4331, 4349, 4360, 4372, 4380, - 4398, 4406, 4436, 4439, 4449, 4459, 4471, 4482, - 4491, 4504, 4516, 4520, 4526, 4553, 4562, 4565, - 4570, 4576, 4581, 4602, 4606, 4612, 4612, 4619, - 4628, 4636, 4639, 4643, 4649, 4655, 4658, 4662, - 4669, 4675, 4684, 4693, 4697, 4701, 4705, 4709, - 4716, 4720, 4724, 4734, 4740, 4744, 4750, 4754, - 4757, 4763, 4769, 4781, 4785, 4789, 4799, 4803, - 4814, 4816, 4818, 4822, 4834, 4839, 4863, 4867, - 4873, 4895, 4904, 4908, 4911, 4912, 4920, 4928, - 4934, 4944, 4951, 4969, 4972, 4975, 4983, 4989, - 4993, 4997, 5001, 5007, 5015, 5020, 5026, 5030, - 5038, 5045, 5049, 5056, 5062, 5070, 5078, 5084, - 5090, 5101, 5105, 5117, 5126, 5143, 5160, 5163, - 5167, 5169, 5175, 5177, 5181, 5196, 5200, 5204, - 5208, 5212, 5216, 5218, 5224, 5229, 5233, 5239, - 5246, 5249, 5267, 5269, 5314, 5320, 5326, 5330, - 5334, 5340, 5344, 5350, 5356, 5363, 5365, 5371, - 5377, 5381, 5385, 5393, 5406, 5412, 5419, 5427, - 5433, 5442, 5448, 5452, 5457, 5461, 5469, 5473, - 5477, 5507, 5513, 5519, 5525, 5531, 5538, 5544, - 5551, 5556, 5566, 5570, 5577, 5583, 5587, 5594, - 5598, 5604, 5607, 5611, 5615, 5619, 5623, 5628, - 5633, 5637, 5648, 5652, 5656, 5662, 5670, 5674, - 5691, 5695, 5701, 5711, 5717, 5723, 5726, 5731, - 5740, 5744, 5748, 5754, 5758, 5764, 5772, 5790, - 5791, 5801, 5802, 5811, 5819, 5821, 5824, 5826, - 5828, 5830, 5835, 5848, 5852, 5867, 5896, 5907, - 5909, 5913, 5917, 5922, 5926, 5928, 5935, 5939, - 5947, 5951, 5952, 5954, 5956, 5958, 5960, 5962, - 5963, 5964, 5966, 5968, 5970, 5971, 5972, 5973, - 5974, 5976, 5978, 5980, 5981, 5982, 5986, 5992, - 5992, 5994, 5996, 6005, 6011, 6018, 6019, 6022, - 6023, 6027, 6032, 6041, 6045, 6049, 6057, 6059, - 6061, 6063, 6066, 6098, 6100, 6102, 6106, 6110, - 6113, 6124, 6137, 6156, 6169, 6185, 6197, 6213, - 6228, 6249, 6259, 6271, 6282, 6296, 6311, 6321, - 6333, 6342, 6354, 6356, 6360, 6381, 6390, 6400, - 6406, 6412, 6413, 6462, 6464, 6468, 6470, 6476, - 6483, 6491, 6498, 6501, 6507, 6511, 6515, 6517, - 6521, 6525, 6529, 6535, 6543, 6551, 6557, 6559, - 6563, 6565, 6571, 6575, 6579, 6583, 6587, 6592, - 6599, 6605, 6607, 6609, 6613, 6615, 6621, 6625, - 6629, 6639, 6644, 6658, 6673, 6675, 6683, 6685, - 6690, 6704, 6709, 6711, 6715, 6716, 6720, 6726, - 6732, 6742, 6752, 6763, 6771, 6774, 6777, 6781, - 6785, 6787, 6790, 6790, 6793, 6795, 6825, 6827, - 6829, 6833, 6838, 6842, 6847, 6849, 6851, 6853, - 6862, 6866, 6870, 6876, 6878, 6886, 6894, 6906, - 6909, 6915, 6919, 6921, 6925, 6945, 6947, 6949, - 6960, 6966, 6968, 6970, 6972, 6976, 6982, 6988, - 6990, 6995, 6999, 7001, 7009, 7027, 7067, 7077, - 7081, 7083, 7085, 7086, 7090, 7094, 7098, 7102, - 7106, 7111, 7115, 7119, 7123, 7125, 7127, 7131, - 7141, 7145, 7147, 7151, 7155, 7159, 7172, 7174, - 7176, 7180, 7182, 7186, 7188, 7190, 7220, 7224, - 7228, 7232, 7235, 7242, 7247, 7258, 7262, 7278, - 7292, 7296, 7301, 7305, 7309, 7315, 7317, 7323, - 7325, 7329, 7331, 7337, 7342, 7347, 7357, 7359, - 7361, 7365, 7369, 7371, 7384, 7386, 7390, 7394, - 7402, 7404, 7408, 7410, 7411, 7414, 7419, 7421, - 7423, 7427, 7429, 7433, 7439, 7459, 7465, 7471, - 7473, 7474, 7484, 7485, 7493, 7500, 7502, 7505, - 7507, 7509, 7511, 7516, 7520, 7524, 7529, 7539, - 7549, 7553, 7557, 7571, 7597, 7607, 7609, 7611, - 7614, 7616, 7619, 7621, 7625, 7627, 7628, 7632, - 7634, 7636, 7643, 7647, 7654, 7661, 7670, 7686, - 7698, 7716, 7727, 7739, 7747, 7765, 7773, 7803, - 7806, 7816, 7826, 7838, 7849, 7858, 7871, 7883, - 7887, 7893, 7920, 7929, 7932, 7937, 7943, 7948, - 7969, 7973, 7979, 7979, 7986, 7995, 8003, 8006, - 8010, 8016, 8022, 8025, 8029, 8036, 8042, 8051, - 8060, 8064, 8068, 8072, 8076, 8083, 8087, 8091, - 8101, 8107, 8111, 8117, 8121, 8124, 8130, 8136, - 8148, 8152, 8156, 8166, 8170, 8181, 8183, 8185, - 8189, 8201, 8206, 8230, 8234, 8240, 8262, 8271, - 8275, 8278, 8279, 8287, 8295, 8301, 8311, 8318, - 8336, 8339, 8342, 8350, 8356, 8360, 8364, 8368, - 8374, 8382, 8387, 8393, 8397, 8405, 8412, 8416, - 8423, 8429, 8437, 8445, 8451, 8457, 8468, 8472, - 8484, 8493, 8510, 8527, 8530, 8534, 8536, 8542, - 8544, 8548, 8563, 8567, 8571, 8575, 8579, 8583, - 8585, 8591, 8596, 8600, 8606, 8613, 8616, 8634, - 8636, 8681, 8687, 8693, 8697, 8701, 8707, 8711, - 8717, 8723, 8730, 8732, 8738, 8744, 8748, 8752, - 8760, 8773, 8779, 8786, 8794, 8800, 8809, 8815, - 8819, 8824, 8828, 8836, 8840, 8844, 8874, 8880, - 8886, 8892, 8898, 8905, 8911, 8918, 8923, 8933, - 8937, 8944, 8950, 8954, 8961, 8965, 8971, 8974, - 8978, 8982, 8986, 8990, 8995, 9000, 9004, 9015, - 9019, 9023, 9029, 9037, 9041, 9058, 9062, 9068, - 9078, 9084, 9090, 9093, 9098, 9107, 9111, 9115, - 9121, 9125, 9131, 9139, 9157, 9158, 9168, 9169, - 9178, 9186, 9188, 9191, 9193, 9195, 9197, 9202, - 9215, 9219, 9234, 9263, 9274, 9276, 9280, 9284, - 9289, 9293, 9295, 9302, 9306, 9314, 9318, 9393, - 9395, 9396, 9397, 9398, 9399, 9400, 9402, 9403, - 9408, 9410, 9412, 9413, 9457, 9458, 9459, 9461, - 9466, 9470, 9470, 9472, 9474, 9485, 9495, 9503, - 9504, 9506, 9507, 9511, 9515, 9525, 9529, 9536, - 9547, 9554, 9558, 9564, 9575, 9607, 9656, 9671, - 9686, 9691, 9693, 9698, 9730, 9738, 9740, 9762, - 9784, 9786, 9802, 9818, 9833, 9842, 9856, 9870, - 9886, 9887, 9888, 9889, 9890, 9892, 9894, 9896, - 9910, 9924, 9925, 9926, 9928, 9930, 9932, 9946, - 9960, 9961, 9962, 9964, 9966, 9968, 10016, 10060, - 10062, 10067, 10071, 10071, 10073, 10075, 10086, 10096, - 10104, 10105, 10107, 10108, 10112, 10116, 10126, 10130, - 10137, 10148, 10155, 10159, 10165, 10176, 10208, 10257, - 10272, 10287, 10292, 10294, 10299, 10331, 10339, 10341, - 10363, 10385, + 16, 58, 99, 145, 146, 150, 156, 156, + 158, 160, 169, 175, 182, 183, 186, 187, + 191, 196, 205, 209, 213, 221, 223, 225, + 227, 230, 262, 264, 266, 270, 274, 277, + 288, 301, 320, 333, 349, 361, 377, 392, + 413, 423, 435, 446, 460, 475, 485, 497, + 506, 518, 520, 524, 545, 554, 564, 570, + 576, 577, 626, 628, 632, 634, 640, 647, + 655, 662, 665, 671, 675, 679, 681, 685, + 689, 693, 699, 707, 715, 721, 723, 727, + 729, 735, 739, 743, 747, 751, 756, 763, + 769, 771, 773, 777, 779, 785, 789, 793, + 803, 808, 822, 837, 839, 847, 849, 854, + 868, 873, 875, 879, 880, 884, 890, 896, + 906, 916, 927, 935, 938, 941, 945, 949, + 951, 954, 954, 957, 959, 989, 991, 993, + 997, 1002, 1006, 1011, 1013, 1015, 1017, 1026, + 1030, 1034, 1040, 1042, 1050, 1058, 1070, 1073, + 1079, 1083, 1085, 1089, 1109, 1111, 1113, 1124, + 1130, 1132, 1134, 1136, 1140, 1146, 1152, 1154, + 1159, 1163, 1165, 1173, 1191, 1231, 1241, 1245, + 1247, 1249, 1250, 1254, 1258, 1262, 1266, 1270, + 1275, 1279, 1283, 1287, 1289, 1291, 1295, 1305, + 1309, 1311, 1315, 1319, 1323, 1336, 1338, 1340, + 1344, 1346, 1350, 1352, 1354, 1384, 1388, 1392, + 1396, 1399, 1406, 1411, 1422, 1426, 1442, 1456, + 1460, 1465, 1469, 1473, 1479, 1481, 1487, 1489, + 1493, 1495, 1501, 1506, 1511, 1521, 1523, 1525, + 1529, 1533, 1535, 1548, 1550, 1554, 1558, 1566, + 1568, 1572, 1574, 1575, 1578, 1583, 1585, 1587, + 1591, 1593, 1597, 1603, 1623, 1629, 1635, 1637, + 1638, 1648, 1649, 1657, 1664, 1666, 1669, 1671, + 1673, 1675, 1680, 1684, 1688, 1693, 1703, 1713, + 1717, 1721, 1735, 1761, 1771, 1773, 1775, 1778, + 1780, 1783, 1785, 1789, 1791, 1792, 1796, 1798, + 1801, 1808, 1816, 1818, 1820, 1824, 1826, 1832, + 1843, 1846, 1848, 1852, 1857, 1887, 1892, 1894, + 1897, 1902, 1916, 1923, 1937, 1942, 1955, 1959, + 1972, 1977, 1995, 1996, 2005, 2009, 2021, 2026, + 2033, 2040, 2047, 2049, 2053, 2075, 2080, 2081, + 2085, 2087, 2137, 2140, 2151, 2155, 2157, 2163, + 2169, 2171, 2176, 2178, 2182, 2184, 2185, 2187, + 2189, 2195, 2197, 2199, 2203, 2209, 2222, 2224, + 2230, 2234, 2242, 2253, 2261, 2264, 2294, 2300, + 2303, 2308, 2310, 2314, 2318, 2322, 2324, 2331, + 2333, 2342, 2349, 2357, 2359, 2379, 2391, 2395, + 2397, 2415, 2454, 2456, 2460, 2462, 2469, 2473, + 2501, 2503, 2505, 2507, 2509, 2512, 2514, 2518, + 2522, 2524, 2527, 2529, 2531, 2534, 2536, 2538, + 2539, 2541, 2543, 2547, 2551, 2554, 2567, 2569, + 2575, 2579, 2581, 2585, 2589, 2603, 2606, 2615, + 2617, 2621, 2627, 2627, 2629, 2631, 2640, 2646, + 2653, 2654, 2657, 2658, 2662, 2667, 2676, 2680, + 2684, 2692, 2694, 2696, 2698, 2701, 2733, 2735, + 2737, 2741, 2745, 2748, 2759, 2772, 2791, 2804, + 2820, 2832, 2848, 2863, 2884, 2894, 2906, 2917, + 2931, 2946, 2956, 2968, 2977, 2989, 2991, 2995, + 3016, 3025, 3035, 3041, 3047, 3048, 3097, 3099, + 3103, 3105, 3111, 3118, 3126, 3133, 3136, 3142, + 3146, 3150, 3152, 3156, 3160, 3164, 3170, 3178, + 3186, 3192, 3194, 3198, 3200, 3206, 3210, 3214, + 3218, 3222, 3227, 3234, 3240, 3242, 3244, 3248, + 3250, 3256, 3260, 3264, 3274, 3279, 3293, 3308, + 3310, 3318, 3320, 3325, 3339, 3344, 3346, 3350, + 3351, 3355, 3361, 3367, 3377, 3387, 3398, 3406, + 3409, 3412, 3416, 3420, 3422, 3425, 3425, 3428, + 3430, 3460, 3462, 3464, 3468, 3473, 3477, 3482, + 3484, 3486, 3488, 3497, 3501, 3505, 3511, 3513, + 3521, 3529, 3541, 3544, 3550, 3554, 3556, 3560, + 3580, 3582, 3584, 3595, 3601, 3603, 3605, 3607, + 3611, 3617, 3623, 3625, 3630, 3634, 3636, 3644, + 3662, 3702, 3712, 3716, 3718, 3720, 3721, 3725, + 3729, 3733, 3737, 3741, 3746, 3750, 3754, 3758, + 3760, 3762, 3766, 3776, 3780, 3782, 3786, 3790, + 3794, 3807, 3809, 3811, 3815, 3817, 3821, 3823, + 3825, 3855, 3859, 3863, 3867, 3870, 3877, 3882, + 3893, 3897, 3913, 3927, 3931, 3936, 3940, 3944, + 3950, 3952, 3958, 3960, 3964, 3966, 3972, 3977, + 3982, 3992, 3994, 3996, 4000, 4004, 4006, 4019, + 4021, 4025, 4029, 4037, 4039, 4043, 4045, 4046, + 4049, 4054, 4056, 4058, 4062, 4064, 4068, 4074, + 4094, 4100, 4106, 4108, 4109, 4119, 4120, 4128, + 4135, 4137, 4140, 4142, 4144, 4146, 4151, 4155, + 4159, 4164, 4174, 4184, 4188, 4192, 4206, 4232, + 4242, 4244, 4246, 4249, 4251, 4254, 4256, 4260, + 4262, 4263, 4267, 4269, 4271, 4278, 4282, 4289, + 4296, 4305, 4321, 4333, 4351, 4362, 4374, 4382, + 4400, 4408, 4438, 4441, 4451, 4461, 4473, 4484, + 4493, 4506, 4518, 4522, 4528, 4555, 4564, 4567, + 4572, 4578, 4583, 4604, 4608, 4614, 4614, 4621, + 4630, 4638, 4641, 4645, 4651, 4657, 4660, 4664, + 4671, 4677, 4686, 4695, 4699, 4703, 4707, 4711, + 4718, 4722, 4726, 4736, 4742, 4746, 4752, 4756, + 4759, 4765, 4771, 4783, 4787, 4791, 4801, 4805, + 4816, 4818, 4820, 4824, 4836, 4841, 4865, 4869, + 4875, 4897, 4906, 4910, 4913, 4914, 4922, 4930, + 4936, 4946, 4953, 4971, 4974, 4977, 4985, 4991, + 4995, 4999, 5003, 5009, 5017, 5022, 5028, 5032, + 5040, 5047, 5051, 5058, 5064, 5072, 5080, 5086, + 5092, 5103, 5107, 5119, 5128, 5145, 5162, 5165, + 5169, 5171, 5177, 5179, 5183, 5198, 5202, 5206, + 5210, 5214, 5218, 5220, 5226, 5231, 5235, 5241, + 5248, 5251, 5269, 5271, 5316, 5322, 5328, 5332, + 5336, 5342, 5346, 5352, 5358, 5365, 5367, 5373, + 5379, 5383, 5387, 5395, 5408, 5414, 5421, 5429, + 5435, 5444, 5450, 5454, 5459, 5463, 5471, 5475, + 5479, 5509, 5515, 5521, 5527, 5533, 5540, 5546, + 5553, 5558, 5568, 5572, 5579, 5585, 5589, 5596, + 5600, 5606, 5609, 5613, 5617, 5621, 5625, 5630, + 5635, 5639, 5650, 5654, 5658, 5664, 5672, 5676, + 5693, 5697, 5703, 5713, 5719, 5725, 5728, 5733, + 5742, 5746, 5750, 5756, 5760, 5766, 5774, 5792, + 5793, 5803, 5804, 5813, 5821, 5823, 5826, 5828, + 5830, 5832, 5837, 5850, 5854, 5869, 5898, 5909, + 5911, 5915, 5919, 5924, 5928, 5930, 5937, 5941, + 5949, 5953, 5954, 5955, 5957, 5959, 5961, 5963, + 5965, 5966, 5967, 5968, 5970, 5972, 5974, 5975, + 5976, 5977, 5978, 5980, 5982, 5984, 5985, 5986, + 5990, 5996, 5996, 5998, 6000, 6009, 6015, 6022, + 6023, 6026, 6027, 6031, 6036, 6045, 6049, 6053, + 6061, 6063, 6065, 6067, 6070, 6102, 6104, 6106, + 6110, 6114, 6117, 6128, 6141, 6160, 6173, 6189, + 6201, 6217, 6232, 6253, 6263, 6275, 6286, 6300, + 6315, 6325, 6337, 6346, 6358, 6360, 6364, 6385, + 6394, 6404, 6410, 6416, 6417, 6466, 6468, 6472, + 6474, 6480, 6487, 6495, 6502, 6505, 6511, 6515, + 6519, 6521, 6525, 6529, 6533, 6539, 6547, 6555, + 6561, 6563, 6567, 6569, 6575, 6579, 6583, 6587, + 6591, 6596, 6603, 6609, 6611, 6613, 6617, 6619, + 6625, 6629, 6633, 6643, 6648, 6662, 6677, 6679, + 6687, 6689, 6694, 6708, 6713, 6715, 6719, 6720, + 6724, 6730, 6736, 6746, 6756, 6767, 6775, 6778, + 6781, 6785, 6789, 6791, 6794, 6794, 6797, 6799, + 6829, 6831, 6833, 6837, 6842, 6846, 6851, 6853, + 6855, 6857, 6866, 6870, 6874, 6880, 6882, 6890, + 6898, 6910, 6913, 6919, 6923, 6925, 6929, 6949, + 6951, 6953, 6964, 6970, 6972, 6974, 6976, 6980, + 6986, 6992, 6994, 6999, 7003, 7005, 7013, 7031, + 7071, 7081, 7085, 7087, 7089, 7090, 7094, 7098, + 7102, 7106, 7110, 7115, 7119, 7123, 7127, 7129, + 7131, 7135, 7145, 7149, 7151, 7155, 7159, 7163, + 7176, 7178, 7180, 7184, 7186, 7190, 7192, 7194, + 7224, 7228, 7232, 7236, 7239, 7246, 7251, 7262, + 7266, 7282, 7296, 7300, 7305, 7309, 7313, 7319, + 7321, 7327, 7329, 7333, 7335, 7341, 7346, 7351, + 7361, 7363, 7365, 7369, 7373, 7375, 7388, 7390, + 7394, 7398, 7406, 7408, 7412, 7414, 7415, 7418, + 7423, 7425, 7427, 7431, 7433, 7437, 7443, 7463, + 7469, 7475, 7477, 7478, 7488, 7489, 7497, 7504, + 7506, 7509, 7511, 7513, 7515, 7520, 7524, 7528, + 7533, 7543, 7553, 7557, 7561, 7575, 7601, 7611, + 7613, 7615, 7618, 7620, 7623, 7625, 7629, 7631, + 7632, 7636, 7638, 7640, 7647, 7651, 7658, 7665, + 7674, 7690, 7702, 7720, 7731, 7743, 7751, 7769, + 7777, 7807, 7810, 7820, 7830, 7842, 7853, 7862, + 7875, 7887, 7891, 7897, 7924, 7933, 7936, 7941, + 7947, 7952, 7973, 7977, 7983, 7983, 7990, 7999, + 8007, 8010, 8014, 8020, 8026, 8029, 8033, 8040, + 8046, 8055, 8064, 8068, 8072, 8076, 8080, 8087, + 8091, 8095, 8105, 8111, 8115, 8121, 8125, 8128, + 8134, 8140, 8152, 8156, 8160, 8170, 8174, 8185, + 8187, 8189, 8193, 8205, 8210, 8234, 8238, 8244, + 8266, 8275, 8279, 8282, 8283, 8291, 8299, 8305, + 8315, 8322, 8340, 8343, 8346, 8354, 8360, 8364, + 8368, 8372, 8378, 8386, 8391, 8397, 8401, 8409, + 8416, 8420, 8427, 8433, 8441, 8449, 8455, 8461, + 8472, 8476, 8488, 8497, 8514, 8531, 8534, 8538, + 8540, 8546, 8548, 8552, 8567, 8571, 8575, 8579, + 8583, 8587, 8589, 8595, 8600, 8604, 8610, 8617, + 8620, 8638, 8640, 8685, 8691, 8697, 8701, 8705, + 8711, 8715, 8721, 8727, 8734, 8736, 8742, 8748, + 8752, 8756, 8764, 8777, 8783, 8790, 8798, 8804, + 8813, 8819, 8823, 8828, 8832, 8840, 8844, 8848, + 8878, 8884, 8890, 8896, 8902, 8909, 8915, 8922, + 8927, 8937, 8941, 8948, 8954, 8958, 8965, 8969, + 8975, 8978, 8982, 8986, 8990, 8994, 8999, 9004, + 9008, 9019, 9023, 9027, 9033, 9041, 9045, 9062, + 9066, 9072, 9082, 9088, 9094, 9097, 9102, 9111, + 9115, 9119, 9125, 9129, 9135, 9143, 9161, 9162, + 9172, 9173, 9182, 9190, 9192, 9195, 9197, 9199, + 9201, 9206, 9219, 9223, 9238, 9267, 9278, 9280, + 9284, 9288, 9293, 9297, 9299, 9306, 9310, 9318, + 9322, 9397, 9399, 9400, 9401, 9402, 9403, 9404, + 9406, 9411, 9413, 9415, 9416, 9460, 9461, 9462, + 9464, 9469, 9473, 9473, 9475, 9477, 9488, 9498, + 9506, 9507, 9509, 9510, 9514, 9518, 9528, 9532, + 9539, 9550, 9557, 9561, 9567, 9578, 9610, 9659, + 9674, 9689, 9694, 9696, 9701, 9733, 9741, 9743, + 9765, 9787, 9789, 9805, 9821, 9837, 9853, 9868, + 9878, 9895, 9912, 9929, 9945, 9955, 9972, 9988, + 10004, 10020, 10036, 10052, 10068, 10084, 10085, 10086, + 10087, 10088, 10090, 10092, 10094, 10108, 10122, 10136, + 10150, 10151, 10152, 10154, 10156, 10158, 10172, 10186, + 10187, 10188, 10190, 10192, 10194, 10243, 10287, 10289, + 10294, 10298, 10298, 10300, 10302, 10313, 10323, 10331, + 10332, 10334, 10335, 10339, 10343, 10353, 10357, 10364, + 10375, 10382, 10386, 10392, 10403, 10435, 10484, 10499, + 10514, 10519, 10521, 10526, 10558, 10566, 10568, 10590, + 10612, } var _hcltok_trans_keys []byte = []byte{ 10, 46, 42, 42, 47, 46, 69, 101, 48, 57, 43, 45, 48, 57, 48, 57, - 45, 194, 195, 198, 199, 203, 205, 206, - 207, 210, 212, 213, 214, 215, 216, 217, - 219, 220, 221, 222, 223, 224, 225, 226, - 227, 228, 233, 234, 237, 239, 240, 65, - 90, 97, 122, 196, 202, 208, 218, 229, - 236, 194, 195, 198, 199, 203, 205, 206, - 207, 210, 212, 213, 214, 215, 216, 217, - 219, 220, 221, 222, 223, 224, 225, 226, - 227, 228, 233, 234, 237, 239, 240, 65, - 90, 97, 122, 196, 202, 208, 218, 229, - 236, 10, 13, 45, 95, 194, 195, 198, - 199, 203, 204, 205, 206, 207, 210, 212, - 213, 214, 215, 216, 217, 219, 220, 221, - 222, 223, 224, 225, 226, 227, 228, 233, - 234, 237, 239, 240, 243, 48, 57, 65, - 90, 97, 122, 196, 218, 229, 236, 10, - 170, 181, 183, 186, 128, 150, 152, 182, - 184, 255, 192, 255, 128, 255, 173, 130, - 133, 146, 159, 165, 171, 175, 255, 181, - 190, 184, 185, 192, 255, 140, 134, 138, - 142, 161, 163, 255, 182, 130, 136, 137, - 176, 151, 152, 154, 160, 190, 136, 144, - 192, 255, 135, 129, 130, 132, 133, 144, - 170, 176, 178, 144, 154, 160, 191, 128, - 169, 174, 255, 148, 169, 157, 158, 189, - 190, 192, 255, 144, 255, 139, 140, 178, - 255, 186, 128, 181, 160, 161, 162, 163, - 164, 165, 166, 167, 168, 169, 170, 171, - 172, 173, 174, 175, 176, 177, 178, 179, - 180, 181, 182, 183, 184, 185, 186, 187, - 188, 189, 190, 191, 128, 173, 128, 155, - 160, 180, 182, 189, 148, 161, 163, 255, - 176, 164, 165, 132, 169, 177, 141, 142, - 145, 146, 179, 181, 186, 187, 158, 133, - 134, 137, 138, 143, 150, 152, 155, 164, - 165, 178, 255, 188, 129, 131, 133, 138, - 143, 144, 147, 168, 170, 176, 178, 179, - 181, 182, 184, 185, 190, 255, 157, 131, - 134, 137, 138, 142, 144, 146, 152, 159, - 165, 182, 255, 129, 131, 133, 141, 143, - 145, 147, 168, 170, 176, 178, 179, 181, - 185, 188, 255, 134, 138, 142, 143, 145, - 159, 164, 165, 176, 184, 186, 255, 129, - 131, 133, 140, 143, 144, 147, 168, 170, - 176, 178, 179, 181, 185, 188, 191, 177, - 128, 132, 135, 136, 139, 141, 150, 151, - 156, 157, 159, 163, 166, 175, 156, 130, - 131, 133, 138, 142, 144, 146, 149, 153, - 154, 158, 159, 163, 164, 168, 170, 174, - 185, 190, 191, 144, 151, 128, 130, 134, - 136, 138, 141, 166, 175, 128, 131, 133, - 140, 142, 144, 146, 168, 170, 185, 189, - 255, 133, 137, 151, 142, 148, 155, 159, - 164, 165, 176, 255, 128, 131, 133, 140, - 142, 144, 146, 168, 170, 179, 181, 185, - 188, 191, 158, 128, 132, 134, 136, 138, - 141, 149, 150, 160, 163, 166, 175, 177, - 178, 129, 131, 133, 140, 142, 144, 146, - 186, 189, 255, 133, 137, 143, 147, 152, - 158, 164, 165, 176, 185, 192, 255, 189, - 130, 131, 133, 150, 154, 177, 179, 187, - 138, 150, 128, 134, 143, 148, 152, 159, - 166, 175, 178, 179, 129, 186, 128, 142, - 144, 153, 132, 138, 141, 165, 167, 129, - 130, 135, 136, 148, 151, 153, 159, 161, - 163, 170, 171, 173, 185, 187, 189, 134, - 128, 132, 136, 141, 144, 153, 156, 159, - 128, 181, 183, 185, 152, 153, 160, 169, - 190, 191, 128, 135, 137, 172, 177, 191, - 128, 132, 134, 151, 153, 188, 134, 128, + 45, 95, 194, 195, 198, 199, 203, 205, + 206, 207, 210, 212, 213, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 228, 233, 234, 237, 239, 240, + 65, 90, 97, 122, 196, 202, 208, 218, + 229, 236, 95, 194, 195, 198, 199, 203, + 205, 206, 207, 210, 212, 213, 214, 215, + 216, 217, 219, 220, 221, 222, 223, 224, + 225, 226, 227, 228, 233, 234, 237, 239, + 240, 65, 90, 97, 122, 196, 202, 208, + 218, 229, 236, 10, 13, 45, 95, 194, + 195, 198, 199, 203, 204, 205, 206, 207, + 210, 212, 213, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 228, 233, 234, 237, 239, 240, 243, 48, + 57, 65, 90, 97, 122, 196, 218, 229, + 236, 10, 170, 181, 183, 186, 128, 150, + 152, 182, 184, 255, 192, 255, 0, 127, + 173, 130, 133, 146, 159, 165, 171, 175, + 255, 181, 190, 184, 185, 192, 255, 140, + 134, 138, 142, 161, 163, 255, 182, 130, + 136, 137, 176, 151, 152, 154, 160, 190, + 136, 144, 192, 255, 135, 129, 130, 132, + 133, 144, 170, 176, 178, 144, 154, 160, + 191, 128, 169, 174, 255, 148, 169, 157, + 158, 189, 190, 192, 255, 144, 255, 139, + 140, 178, 255, 186, 128, 181, 160, 161, + 162, 163, 164, 165, 166, 167, 168, 169, + 170, 171, 172, 173, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 183, 184, 185, + 186, 187, 188, 189, 190, 191, 128, 173, + 128, 155, 160, 180, 182, 189, 148, 161, + 163, 255, 176, 164, 165, 132, 169, 177, + 141, 142, 145, 146, 179, 181, 186, 187, + 158, 133, 134, 137, 138, 143, 150, 152, + 155, 164, 165, 178, 255, 188, 129, 131, + 133, 138, 143, 144, 147, 168, 170, 176, + 178, 179, 181, 182, 184, 185, 190, 255, + 157, 131, 134, 137, 138, 142, 144, 146, + 152, 159, 165, 182, 255, 129, 131, 133, + 141, 143, 145, 147, 168, 170, 176, 178, + 179, 181, 185, 188, 255, 134, 138, 142, + 143, 145, 159, 164, 165, 176, 184, 186, + 255, 129, 131, 133, 140, 143, 144, 147, + 168, 170, 176, 178, 179, 181, 185, 188, + 191, 177, 128, 132, 135, 136, 139, 141, + 150, 151, 156, 157, 159, 163, 166, 175, + 156, 130, 131, 133, 138, 142, 144, 146, + 149, 153, 154, 158, 159, 163, 164, 168, + 170, 174, 185, 190, 191, 144, 151, 128, + 130, 134, 136, 138, 141, 166, 175, 128, + 131, 133, 140, 142, 144, 146, 168, 170, + 185, 189, 255, 133, 137, 151, 142, 148, + 155, 159, 164, 165, 176, 255, 128, 131, + 133, 140, 142, 144, 146, 168, 170, 179, + 181, 185, 188, 191, 158, 128, 132, 134, + 136, 138, 141, 149, 150, 160, 163, 166, + 175, 177, 178, 129, 131, 133, 140, 142, + 144, 146, 186, 189, 255, 133, 137, 143, + 147, 152, 158, 164, 165, 176, 185, 192, + 255, 189, 130, 131, 133, 150, 154, 177, + 179, 187, 138, 150, 128, 134, 143, 148, + 152, 159, 166, 175, 178, 179, 129, 186, + 128, 142, 144, 153, 132, 138, 141, 165, + 167, 129, 130, 135, 136, 148, 151, 153, + 159, 161, 163, 170, 171, 173, 185, 187, + 189, 134, 128, 132, 136, 141, 144, 153, + 156, 159, 128, 181, 183, 185, 152, 153, + 160, 169, 190, 191, 128, 135, 137, 172, + 177, 191, 128, 132, 134, 151, 153, 188, + 134, 128, 129, 130, 131, 137, 138, 139, + 140, 141, 142, 143, 144, 153, 154, 155, + 156, 157, 158, 159, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 173, + 175, 176, 177, 178, 179, 181, 182, 183, + 188, 189, 190, 191, 132, 152, 172, 184, + 185, 187, 128, 191, 128, 137, 144, 255, + 158, 159, 134, 187, 136, 140, 142, 143, + 137, 151, 153, 142, 143, 158, 159, 137, + 177, 142, 143, 182, 183, 191, 255, 128, + 130, 133, 136, 150, 152, 255, 145, 150, + 151, 155, 156, 160, 168, 178, 255, 128, + 143, 160, 255, 182, 183, 190, 255, 129, + 255, 173, 174, 192, 255, 129, 154, 160, + 255, 171, 173, 185, 255, 128, 140, 142, + 148, 160, 180, 128, 147, 160, 172, 174, + 176, 178, 179, 148, 150, 152, 155, 158, + 159, 170, 255, 139, 141, 144, 153, 160, + 255, 184, 255, 128, 170, 176, 255, 182, + 255, 128, 158, 160, 171, 176, 187, 134, + 173, 176, 180, 128, 171, 176, 255, 138, + 143, 155, 255, 128, 155, 160, 255, 159, + 189, 190, 192, 255, 167, 128, 137, 144, + 153, 176, 189, 140, 143, 154, 170, 180, + 255, 180, 255, 128, 183, 128, 137, 141, + 189, 128, 136, 144, 146, 148, 182, 184, + 185, 128, 181, 187, 191, 150, 151, 158, + 159, 152, 154, 156, 158, 134, 135, 142, + 143, 190, 255, 190, 128, 180, 182, 188, + 130, 132, 134, 140, 144, 147, 150, 155, + 160, 172, 178, 180, 182, 188, 128, 129, + 130, 131, 132, 133, 134, 176, 177, 178, + 179, 180, 181, 182, 183, 191, 255, 129, + 147, 149, 176, 178, 190, 192, 255, 144, + 156, 161, 144, 156, 165, 176, 130, 135, + 149, 164, 166, 168, 138, 147, 152, 157, + 170, 185, 188, 191, 142, 133, 137, 160, + 255, 137, 255, 128, 174, 176, 255, 159, + 165, 170, 180, 255, 167, 173, 128, 165, + 176, 255, 168, 174, 176, 190, 192, 255, + 128, 150, 160, 166, 168, 174, 176, 182, + 184, 190, 128, 134, 136, 142, 144, 150, + 152, 158, 160, 191, 128, 129, 130, 131, + 132, 133, 134, 135, 144, 145, 255, 133, + 135, 161, 175, 177, 181, 184, 188, 160, + 151, 152, 187, 192, 255, 133, 173, 177, + 255, 143, 159, 187, 255, 176, 191, 182, + 183, 184, 191, 192, 255, 150, 255, 128, + 146, 147, 148, 152, 153, 154, 155, 156, + 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 129, 255, 141, 255, 144, + 189, 141, 143, 172, 255, 191, 128, 175, + 180, 189, 151, 159, 162, 255, 175, 137, + 138, 184, 255, 183, 255, 168, 255, 128, + 179, 188, 134, 143, 154, 159, 184, 186, + 190, 255, 128, 173, 176, 255, 148, 159, + 189, 255, 129, 142, 154, 159, 191, 255, + 128, 182, 128, 141, 144, 153, 160, 182, + 186, 255, 128, 130, 155, 157, 160, 175, + 178, 182, 129, 134, 137, 142, 145, 150, + 160, 166, 168, 174, 176, 255, 155, 166, + 175, 128, 170, 172, 173, 176, 185, 158, + 159, 160, 255, 164, 175, 135, 138, 188, + 255, 164, 169, 171, 172, 173, 174, 175, + 180, 181, 182, 183, 184, 185, 187, 188, + 189, 190, 191, 165, 186, 174, 175, 154, + 255, 190, 128, 134, 147, 151, 157, 168, + 170, 182, 184, 188, 128, 129, 131, 132, + 134, 255, 147, 255, 190, 255, 144, 145, + 136, 175, 188, 255, 128, 143, 160, 175, + 179, 180, 141, 143, 176, 180, 182, 255, + 189, 255, 191, 144, 153, 161, 186, 129, + 154, 166, 255, 191, 255, 130, 135, 138, + 143, 146, 151, 154, 156, 144, 145, 146, + 147, 148, 150, 151, 152, 155, 157, 158, + 160, 170, 171, 172, 175, 161, 169, 128, + 129, 130, 131, 133, 135, 138, 139, 140, + 141, 142, 143, 144, 145, 146, 147, 148, + 149, 152, 156, 157, 160, 161, 162, 163, + 164, 166, 168, 169, 170, 171, 172, 173, + 174, 176, 177, 153, 155, 178, 179, 128, + 139, 141, 166, 168, 186, 188, 189, 191, + 255, 142, 143, 158, 255, 187, 255, 128, + 180, 189, 128, 156, 160, 255, 145, 159, + 161, 255, 128, 159, 176, 255, 139, 143, + 187, 255, 128, 157, 160, 255, 144, 132, + 135, 150, 255, 158, 159, 170, 175, 148, + 151, 188, 255, 128, 167, 176, 255, 164, + 255, 183, 255, 128, 149, 160, 167, 136, + 188, 128, 133, 138, 181, 183, 184, 191, + 255, 150, 159, 183, 255, 128, 158, 160, + 178, 180, 181, 128, 149, 160, 185, 128, + 183, 190, 191, 191, 128, 131, 133, 134, + 140, 147, 149, 151, 153, 179, 184, 186, + 160, 188, 128, 156, 128, 135, 137, 166, + 128, 181, 128, 149, 160, 178, 128, 145, + 128, 178, 129, 130, 131, 132, 133, 135, + 136, 138, 139, 140, 141, 144, 145, 146, + 147, 150, 151, 152, 153, 154, 155, 156, + 162, 163, 171, 176, 177, 178, 128, 134, + 135, 165, 176, 190, 144, 168, 176, 185, + 128, 180, 182, 191, 182, 144, 179, 155, + 133, 137, 141, 143, 157, 255, 190, 128, + 145, 147, 183, 136, 128, 134, 138, 141, + 143, 157, 159, 168, 176, 255, 171, 175, + 186, 255, 128, 131, 133, 140, 143, 144, + 147, 168, 170, 176, 178, 179, 181, 185, + 188, 191, 144, 151, 128, 132, 135, 136, + 139, 141, 157, 163, 166, 172, 176, 180, + 128, 138, 144, 153, 134, 136, 143, 154, + 255, 128, 181, 184, 255, 129, 151, 158, + 255, 129, 131, 133, 143, 154, 255, 128, + 137, 128, 153, 157, 171, 176, 185, 160, + 255, 170, 190, 192, 255, 128, 184, 128, + 136, 138, 182, 184, 191, 128, 144, 153, + 178, 255, 168, 144, 145, 183, 255, 128, + 142, 145, 149, 129, 141, 144, 146, 147, + 148, 175, 255, 132, 255, 128, 144, 129, + 143, 144, 153, 145, 152, 135, 255, 160, + 168, 169, 171, 172, 173, 174, 188, 189, + 190, 191, 161, 167, 185, 255, 128, 158, + 160, 169, 144, 173, 176, 180, 128, 131, + 144, 153, 163, 183, 189, 255, 144, 255, + 133, 143, 191, 255, 143, 159, 160, 128, + 129, 255, 159, 160, 171, 172, 255, 173, + 255, 179, 255, 128, 176, 177, 178, 128, + 129, 171, 175, 189, 255, 128, 136, 144, + 153, 157, 158, 133, 134, 137, 144, 145, + 146, 147, 148, 149, 154, 155, 156, 157, + 158, 159, 168, 169, 170, 150, 153, 165, + 169, 173, 178, 187, 255, 131, 132, 140, + 169, 174, 255, 130, 132, 149, 157, 173, + 186, 188, 160, 161, 163, 164, 167, 168, + 132, 134, 149, 157, 186, 139, 140, 191, + 255, 134, 128, 132, 138, 144, 146, 255, + 166, 167, 129, 155, 187, 149, 181, 143, + 175, 137, 169, 131, 140, 141, 192, 255, + 128, 182, 187, 255, 173, 180, 182, 255, + 132, 155, 159, 161, 175, 128, 160, 163, + 164, 165, 184, 185, 186, 161, 162, 128, + 134, 136, 152, 155, 161, 163, 164, 166, + 170, 133, 143, 151, 255, 139, 143, 154, + 255, 164, 167, 185, 187, 128, 131, 133, + 159, 161, 162, 169, 178, 180, 183, 130, + 135, 137, 139, 148, 151, 153, 155, 157, + 159, 164, 190, 141, 143, 145, 146, 161, + 162, 167, 170, 172, 178, 180, 183, 185, + 188, 128, 137, 139, 155, 161, 163, 165, + 169, 171, 187, 155, 156, 151, 255, 156, + 157, 160, 181, 255, 186, 187, 255, 162, + 255, 160, 168, 161, 167, 158, 255, 160, + 132, 135, 133, 134, 176, 255, 170, 181, + 186, 191, 176, 180, 182, 183, 186, 189, + 134, 140, 136, 138, 142, 161, 163, 255, + 130, 137, 136, 255, 144, 170, 176, 178, + 160, 191, 128, 138, 174, 175, 177, 255, + 148, 150, 164, 167, 173, 176, 185, 189, + 190, 192, 255, 144, 146, 175, 141, 255, + 166, 176, 178, 255, 186, 138, 170, 180, + 181, 160, 161, 162, 164, 165, 166, 167, + 168, 169, 170, 171, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 184, + 186, 187, 188, 189, 190, 183, 185, 154, + 164, 168, 128, 149, 128, 152, 189, 132, + 185, 144, 152, 161, 177, 255, 169, 177, + 129, 132, 141, 142, 145, 146, 179, 181, + 186, 188, 190, 255, 142, 156, 157, 159, + 161, 176, 177, 133, 138, 143, 144, 147, + 168, 170, 176, 178, 179, 181, 182, 184, + 185, 158, 153, 156, 178, 180, 189, 133, + 141, 143, 145, 147, 168, 170, 176, 178, + 179, 181, 185, 144, 185, 160, 161, 189, + 133, 140, 143, 144, 147, 168, 170, 176, + 178, 179, 181, 185, 177, 156, 157, 159, + 161, 131, 156, 133, 138, 142, 144, 146, + 149, 153, 154, 158, 159, 163, 164, 168, + 170, 174, 185, 144, 189, 133, 140, 142, + 144, 146, 168, 170, 185, 152, 154, 160, + 161, 128, 189, 133, 140, 142, 144, 146, + 168, 170, 179, 181, 185, 158, 160, 161, + 177, 178, 189, 133, 140, 142, 144, 146, + 186, 142, 148, 150, 159, 161, 186, 191, + 189, 133, 150, 154, 177, 179, 187, 128, + 134, 129, 176, 178, 179, 132, 138, 141, + 165, 167, 189, 129, 130, 135, 136, 148, + 151, 153, 159, 161, 163, 170, 171, 173, + 176, 178, 179, 134, 128, 132, 156, 159, + 128, 128, 135, 137, 172, 136, 140, 128, 129, 130, 131, 137, 138, 139, 140, 141, 142, 143, 144, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, - 166, 167, 168, 169, 170, 173, 175, 176, - 177, 178, 179, 181, 182, 183, 188, 189, - 190, 191, 132, 152, 172, 184, 185, 187, - 128, 191, 128, 137, 144, 255, 158, 159, - 134, 187, 136, 140, 142, 143, 137, 151, - 153, 142, 143, 158, 159, 137, 177, 142, - 143, 182, 183, 191, 255, 128, 130, 133, - 136, 150, 152, 255, 145, 150, 151, 155, - 156, 160, 168, 178, 255, 128, 143, 160, - 255, 182, 183, 190, 255, 129, 255, 173, - 174, 192, 255, 129, 154, 160, 255, 171, - 173, 185, 255, 128, 140, 142, 148, 160, - 180, 128, 147, 160, 172, 174, 176, 178, - 179, 148, 150, 152, 155, 158, 159, 170, - 255, 139, 141, 144, 153, 160, 255, 184, - 255, 128, 170, 176, 255, 182, 255, 128, - 158, 160, 171, 176, 187, 134, 173, 176, - 180, 128, 171, 176, 255, 138, 143, 155, - 255, 128, 155, 160, 255, 159, 189, 190, - 192, 255, 167, 128, 137, 144, 153, 176, - 189, 140, 143, 154, 170, 180, 255, 180, - 255, 128, 183, 128, 137, 141, 189, 128, - 136, 144, 146, 148, 182, 184, 185, 128, - 181, 187, 191, 150, 151, 158, 159, 152, - 154, 156, 158, 134, 135, 142, 143, 190, - 255, 190, 128, 180, 182, 188, 130, 132, - 134, 140, 144, 147, 150, 155, 160, 172, - 178, 180, 182, 188, 128, 129, 130, 131, - 132, 133, 134, 176, 177, 178, 179, 180, - 181, 182, 183, 191, 255, 129, 147, 149, - 176, 178, 190, 192, 255, 144, 156, 161, - 144, 156, 165, 176, 130, 135, 149, 164, - 166, 168, 138, 147, 152, 157, 170, 185, - 188, 191, 142, 133, 137, 160, 255, 137, - 255, 128, 174, 176, 255, 159, 165, 170, - 180, 255, 167, 173, 128, 165, 176, 255, - 168, 174, 176, 190, 192, 255, 128, 150, - 160, 166, 168, 174, 176, 182, 184, 190, - 128, 134, 136, 142, 144, 150, 152, 158, - 160, 191, 128, 129, 130, 131, 132, 133, + 166, 167, 168, 169, 170, 172, 173, 174, + 175, 176, 177, 178, 179, 180, 181, 182, + 184, 188, 189, 190, 191, 132, 152, 185, + 187, 191, 128, 170, 161, 144, 149, 154, + 157, 165, 166, 174, 176, 181, 255, 130, + 141, 143, 159, 155, 255, 128, 140, 142, + 145, 160, 177, 128, 145, 160, 172, 174, + 176, 151, 156, 170, 128, 168, 176, 255, + 138, 255, 128, 150, 160, 255, 149, 255, + 167, 133, 179, 133, 139, 131, 160, 174, + 175, 186, 255, 166, 255, 128, 163, 141, + 143, 154, 189, 169, 172, 174, 177, 181, + 182, 129, 130, 132, 133, 134, 176, 177, + 178, 179, 180, 181, 182, 183, 177, 191, + 165, 170, 175, 177, 180, 255, 168, 174, + 176, 255, 128, 134, 136, 142, 144, 150, + 152, 158, 128, 129, 130, 131, 132, 133, 134, 135, 144, 145, 255, 133, 135, 161, - 175, 177, 181, 184, 188, 160, 151, 152, - 187, 192, 255, 133, 173, 177, 255, 143, - 159, 187, 255, 176, 191, 182, 183, 184, - 191, 192, 255, 150, 255, 128, 146, 147, - 148, 152, 153, 154, 155, 156, 158, 159, - 160, 161, 162, 163, 164, 165, 166, 167, - 168, 169, 170, 171, 172, 173, 174, 175, - 176, 129, 255, 141, 255, 144, 189, 141, - 143, 172, 255, 191, 128, 175, 180, 189, - 151, 159, 162, 255, 175, 137, 138, 184, - 255, 183, 255, 168, 255, 128, 179, 188, - 134, 143, 154, 159, 184, 186, 190, 255, - 128, 173, 176, 255, 148, 159, 189, 255, - 129, 142, 154, 159, 191, 255, 128, 182, - 128, 141, 144, 153, 160, 182, 186, 255, - 128, 130, 155, 157, 160, 175, 178, 182, - 129, 134, 137, 142, 145, 150, 160, 166, - 168, 174, 176, 255, 155, 166, 175, 128, - 170, 172, 173, 176, 185, 158, 159, 160, - 255, 164, 175, 135, 138, 188, 255, 164, + 169, 177, 181, 184, 188, 160, 151, 154, + 128, 146, 147, 148, 152, 153, 154, 155, + 156, 158, 159, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 171, 172, + 173, 174, 175, 176, 129, 255, 141, 143, + 160, 169, 172, 255, 191, 128, 174, 130, + 134, 139, 163, 255, 130, 179, 187, 189, + 178, 183, 138, 165, 176, 255, 135, 159, + 189, 255, 132, 178, 143, 160, 164, 166, + 175, 186, 190, 128, 168, 186, 128, 130, + 132, 139, 160, 182, 190, 255, 176, 178, + 180, 183, 184, 190, 255, 128, 130, 155, + 157, 160, 170, 178, 180, 128, 162, 164, 169, 171, 172, 173, 174, 175, 180, 181, - 182, 183, 184, 185, 187, 188, 189, 190, - 191, 165, 186, 174, 175, 154, 255, 190, - 128, 134, 147, 151, 157, 168, 170, 182, - 184, 188, 128, 129, 131, 132, 134, 255, - 147, 255, 190, 255, 144, 145, 136, 175, - 188, 255, 128, 143, 160, 175, 179, 180, - 141, 143, 176, 180, 182, 255, 189, 255, - 191, 144, 153, 161, 186, 129, 154, 166, - 255, 191, 255, 130, 135, 138, 143, 146, - 151, 154, 156, 144, 145, 146, 147, 148, - 150, 151, 152, 155, 157, 158, 160, 170, - 171, 172, 175, 161, 169, 128, 129, 130, - 131, 133, 135, 138, 139, 140, 141, 142, - 143, 144, 145, 146, 147, 148, 149, 152, - 156, 157, 160, 161, 162, 163, 164, 166, - 168, 169, 170, 171, 172, 173, 174, 176, - 177, 153, 155, 178, 179, 128, 139, 141, - 166, 168, 186, 188, 189, 191, 255, 142, - 143, 158, 255, 187, 255, 128, 180, 189, - 128, 156, 160, 255, 145, 159, 161, 255, - 128, 159, 176, 255, 139, 143, 187, 255, - 128, 157, 160, 255, 144, 132, 135, 150, - 255, 158, 159, 170, 175, 148, 151, 188, - 255, 128, 167, 176, 255, 164, 255, 183, - 255, 128, 149, 160, 167, 136, 188, 128, - 133, 138, 181, 183, 184, 191, 255, 150, - 159, 183, 255, 128, 158, 160, 178, 180, - 181, 128, 149, 160, 185, 128, 183, 190, - 191, 191, 128, 131, 133, 134, 140, 147, - 149, 151, 153, 179, 184, 186, 160, 188, - 128, 156, 128, 135, 137, 166, 128, 181, - 128, 149, 160, 178, 128, 145, 128, 178, - 129, 130, 131, 132, 133, 135, 136, 138, - 139, 140, 141, 144, 145, 146, 147, 150, - 151, 152, 153, 154, 155, 156, 162, 163, - 171, 176, 177, 178, 128, 134, 135, 165, - 176, 190, 144, 168, 176, 185, 128, 180, - 182, 191, 182, 144, 179, 155, 133, 137, - 141, 143, 157, 255, 190, 128, 145, 147, - 183, 136, 128, 134, 138, 141, 143, 157, - 159, 168, 176, 255, 171, 175, 186, 255, - 128, 131, 133, 140, 143, 144, 147, 168, + 182, 183, 185, 186, 187, 188, 189, 190, + 191, 165, 179, 157, 190, 128, 134, 147, + 151, 159, 168, 170, 182, 184, 188, 176, + 180, 182, 255, 161, 186, 144, 145, 146, + 147, 148, 150, 151, 152, 155, 157, 158, + 160, 170, 171, 172, 175, 161, 169, 128, + 129, 130, 131, 133, 138, 139, 140, 141, + 142, 143, 144, 145, 146, 147, 148, 149, + 152, 156, 157, 160, 161, 162, 163, 164, + 166, 168, 169, 170, 171, 172, 173, 174, + 176, 177, 153, 155, 178, 179, 145, 255, + 139, 143, 182, 255, 158, 175, 128, 144, + 147, 149, 151, 153, 179, 128, 135, 137, + 164, 128, 130, 131, 132, 133, 134, 135, + 136, 138, 139, 140, 141, 144, 145, 146, + 147, 150, 151, 152, 153, 154, 156, 162, + 163, 171, 176, 177, 178, 131, 183, 131, + 175, 144, 168, 131, 166, 182, 144, 178, + 131, 178, 154, 156, 129, 132, 128, 145, + 147, 171, 159, 255, 144, 157, 161, 135, + 138, 128, 175, 135, 132, 133, 128, 174, + 152, 155, 132, 128, 170, 128, 153, 160, + 190, 192, 255, 128, 136, 138, 174, 128, + 178, 255, 160, 168, 169, 171, 172, 173, + 174, 188, 189, 190, 191, 161, 167, 144, + 173, 128, 131, 163, 183, 189, 255, 133, + 143, 145, 255, 147, 159, 128, 176, 177, + 178, 128, 136, 144, 153, 144, 145, 146, + 147, 148, 149, 154, 155, 156, 157, 158, + 159, 150, 153, 131, 140, 255, 160, 163, + 164, 165, 184, 185, 186, 161, 162, 133, + 255, 170, 181, 183, 186, 128, 150, 152, + 182, 184, 255, 192, 255, 128, 255, 173, + 130, 133, 146, 159, 165, 171, 175, 255, + 181, 190, 184, 185, 192, 255, 140, 134, + 138, 142, 161, 163, 255, 182, 130, 136, + 137, 176, 151, 152, 154, 160, 190, 136, + 144, 192, 255, 135, 129, 130, 132, 133, + 144, 170, 176, 178, 144, 154, 160, 191, + 128, 169, 174, 255, 148, 169, 157, 158, + 189, 190, 192, 255, 144, 255, 139, 140, + 178, 255, 186, 128, 181, 160, 161, 162, + 163, 164, 165, 166, 167, 168, 169, 170, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 128, 173, 128, + 155, 160, 180, 182, 189, 148, 161, 163, + 255, 176, 164, 165, 132, 169, 177, 141, + 142, 145, 146, 179, 181, 186, 187, 158, + 133, 134, 137, 138, 143, 150, 152, 155, + 164, 165, 178, 255, 188, 129, 131, 133, + 138, 143, 144, 147, 168, 170, 176, 178, + 179, 181, 182, 184, 185, 190, 255, 157, + 131, 134, 137, 138, 142, 144, 146, 152, + 159, 165, 182, 255, 129, 131, 133, 141, + 143, 145, 147, 168, 170, 176, 178, 179, + 181, 185, 188, 255, 134, 138, 142, 143, + 145, 159, 164, 165, 176, 184, 186, 255, + 129, 131, 133, 140, 143, 144, 147, 168, 170, 176, 178, 179, 181, 185, 188, 191, - 144, 151, 128, 132, 135, 136, 139, 141, - 157, 163, 166, 172, 176, 180, 128, 138, - 144, 153, 134, 136, 143, 154, 255, 128, - 181, 184, 255, 129, 151, 158, 255, 129, - 131, 133, 143, 154, 255, 128, 137, 128, - 153, 157, 171, 176, 185, 160, 255, 170, - 190, 192, 255, 128, 184, 128, 136, 138, - 182, 184, 191, 128, 144, 153, 178, 255, - 168, 144, 145, 183, 255, 128, 142, 145, - 149, 129, 141, 144, 146, 147, 148, 175, - 255, 132, 255, 128, 144, 129, 143, 144, - 153, 145, 152, 135, 255, 160, 168, 169, - 171, 172, 173, 174, 188, 189, 190, 191, - 161, 167, 185, 255, 128, 158, 160, 169, - 144, 173, 176, 180, 128, 131, 144, 153, - 163, 183, 189, 255, 144, 255, 133, 143, - 191, 255, 143, 159, 160, 128, 129, 255, - 159, 160, 171, 172, 255, 173, 255, 179, - 255, 128, 176, 177, 178, 128, 129, 171, - 175, 189, 255, 128, 136, 144, 153, 157, - 158, 133, 134, 137, 144, 145, 146, 147, - 148, 149, 154, 155, 156, 157, 158, 159, - 168, 169, 170, 150, 153, 165, 169, 173, - 178, 187, 255, 131, 132, 140, 169, 174, - 255, 130, 132, 149, 157, 173, 186, 188, - 160, 161, 163, 164, 167, 168, 132, 134, - 149, 157, 186, 139, 140, 191, 255, 134, - 128, 132, 138, 144, 146, 255, 166, 167, - 129, 155, 187, 149, 181, 143, 175, 137, - 169, 131, 140, 141, 192, 255, 128, 182, - 187, 255, 173, 180, 182, 255, 132, 155, - 159, 161, 175, 128, 160, 163, 164, 165, - 184, 185, 186, 161, 162, 128, 134, 136, - 152, 155, 161, 163, 164, 166, 170, 133, - 143, 151, 255, 139, 143, 154, 255, 164, - 167, 185, 187, 128, 131, 133, 159, 161, - 162, 169, 178, 180, 183, 130, 135, 137, - 139, 148, 151, 153, 155, 157, 159, 164, - 190, 141, 143, 145, 146, 161, 162, 167, - 170, 172, 178, 180, 183, 185, 188, 128, - 137, 139, 155, 161, 163, 165, 169, 171, - 187, 155, 156, 151, 255, 156, 157, 160, - 181, 255, 186, 187, 255, 162, 255, 160, - 168, 161, 167, 158, 255, 160, 132, 135, - 133, 134, 176, 255, 170, 181, 186, 191, - 176, 180, 182, 183, 186, 189, 134, 140, - 136, 138, 142, 161, 163, 255, 130, 137, - 136, 255, 144, 170, 176, 178, 160, 191, - 128, 138, 174, 175, 177, 255, 148, 150, - 164, 167, 173, 176, 185, 189, 190, 192, - 255, 144, 146, 175, 141, 255, 166, 176, - 178, 255, 186, 138, 170, 180, 181, 160, - 161, 162, 164, 165, 166, 167, 168, 169, - 170, 171, 172, 173, 174, 175, 176, 177, - 178, 179, 180, 181, 182, 184, 186, 187, - 188, 189, 190, 183, 185, 154, 164, 168, - 128, 149, 128, 152, 189, 132, 185, 144, - 152, 161, 177, 255, 169, 177, 129, 132, - 141, 142, 145, 146, 179, 181, 186, 188, - 190, 255, 142, 156, 157, 159, 161, 176, - 177, 133, 138, 143, 144, 147, 168, 170, - 176, 178, 179, 181, 182, 184, 185, 158, - 153, 156, 178, 180, 189, 133, 141, 143, - 145, 147, 168, 170, 176, 178, 179, 181, - 185, 144, 185, 160, 161, 189, 133, 140, - 143, 144, 147, 168, 170, 176, 178, 179, - 181, 185, 177, 156, 157, 159, 161, 131, - 156, 133, 138, 142, 144, 146, 149, 153, - 154, 158, 159, 163, 164, 168, 170, 174, - 185, 144, 189, 133, 140, 142, 144, 146, - 168, 170, 185, 152, 154, 160, 161, 128, - 189, 133, 140, 142, 144, 146, 168, 170, - 179, 181, 185, 158, 160, 161, 177, 178, - 189, 133, 140, 142, 144, 146, 186, 142, - 148, 150, 159, 161, 186, 191, 189, 133, - 150, 154, 177, 179, 187, 128, 134, 129, - 176, 178, 179, 132, 138, 141, 165, 167, - 189, 129, 130, 135, 136, 148, 151, 153, - 159, 161, 163, 170, 171, 173, 176, 178, - 179, 134, 128, 132, 156, 159, 128, 128, - 135, 137, 172, 136, 140, 128, 129, 130, - 131, 137, 138, 139, 140, 141, 142, 143, - 144, 153, 154, 155, 156, 157, 158, 159, - 160, 161, 162, 163, 164, 165, 166, 167, - 168, 169, 170, 172, 173, 174, 175, 176, - 177, 178, 179, 180, 181, 182, 184, 188, - 189, 190, 191, 132, 152, 185, 187, 191, - 128, 170, 161, 144, 149, 154, 157, 165, - 166, 174, 176, 181, 255, 130, 141, 143, - 159, 155, 255, 128, 140, 142, 145, 160, - 177, 128, 145, 160, 172, 174, 176, 151, - 156, 170, 128, 168, 176, 255, 138, 255, - 128, 150, 160, 255, 149, 255, 167, 133, - 179, 133, 139, 131, 160, 174, 175, 186, - 255, 166, 255, 128, 163, 141, 143, 154, - 189, 169, 172, 174, 177, 181, 182, 129, - 130, 132, 133, 134, 176, 177, 178, 179, - 180, 181, 182, 183, 177, 191, 165, 170, - 175, 177, 180, 255, 168, 174, 176, 255, - 128, 134, 136, 142, 144, 150, 152, 158, - 128, 129, 130, 131, 132, 133, 134, 135, - 144, 145, 255, 133, 135, 161, 169, 177, - 181, 184, 188, 160, 151, 154, 128, 146, + 177, 128, 132, 135, 136, 139, 141, 150, + 151, 156, 157, 159, 163, 166, 175, 156, + 130, 131, 133, 138, 142, 144, 146, 149, + 153, 154, 158, 159, 163, 164, 168, 170, + 174, 185, 190, 191, 144, 151, 128, 130, + 134, 136, 138, 141, 166, 175, 128, 131, + 133, 140, 142, 144, 146, 168, 170, 185, + 189, 255, 133, 137, 151, 142, 148, 155, + 159, 164, 165, 176, 255, 128, 131, 133, + 140, 142, 144, 146, 168, 170, 179, 181, + 185, 188, 191, 158, 128, 132, 134, 136, + 138, 141, 149, 150, 160, 163, 166, 175, + 177, 178, 129, 131, 133, 140, 142, 144, + 146, 186, 189, 255, 133, 137, 143, 147, + 152, 158, 164, 165, 176, 185, 192, 255, + 189, 130, 131, 133, 150, 154, 177, 179, + 187, 138, 150, 128, 134, 143, 148, 152, + 159, 166, 175, 178, 179, 129, 186, 128, + 142, 144, 153, 132, 138, 141, 165, 167, + 129, 130, 135, 136, 148, 151, 153, 159, + 161, 163, 170, 171, 173, 185, 187, 189, + 134, 128, 132, 136, 141, 144, 153, 156, + 159, 128, 181, 183, 185, 152, 153, 160, + 169, 190, 191, 128, 135, 137, 172, 177, + 191, 128, 132, 134, 151, 153, 188, 134, + 128, 129, 130, 131, 137, 138, 139, 140, + 141, 142, 143, 144, 153, 154, 155, 156, + 157, 158, 159, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 173, 175, + 176, 177, 178, 179, 181, 182, 183, 188, + 189, 190, 191, 132, 152, 172, 184, 185, + 187, 128, 191, 128, 137, 144, 255, 158, + 159, 134, 187, 136, 140, 142, 143, 137, + 151, 153, 142, 143, 158, 159, 137, 177, + 142, 143, 182, 183, 191, 255, 128, 130, + 133, 136, 150, 152, 255, 145, 150, 151, + 155, 156, 160, 168, 178, 255, 128, 143, + 160, 255, 182, 183, 190, 255, 129, 255, + 173, 174, 192, 255, 129, 154, 160, 255, + 171, 173, 185, 255, 128, 140, 142, 148, + 160, 180, 128, 147, 160, 172, 174, 176, + 178, 179, 148, 150, 152, 155, 158, 159, + 170, 255, 139, 141, 144, 153, 160, 255, + 184, 255, 128, 170, 176, 255, 182, 255, + 128, 158, 160, 171, 176, 187, 134, 173, + 176, 180, 128, 171, 176, 255, 138, 143, + 155, 255, 128, 155, 160, 255, 159, 189, + 190, 192, 255, 167, 128, 137, 144, 153, + 176, 189, 140, 143, 154, 170, 180, 255, + 180, 255, 128, 183, 128, 137, 141, 189, + 128, 136, 144, 146, 148, 182, 184, 185, + 128, 181, 187, 191, 150, 151, 158, 159, + 152, 154, 156, 158, 134, 135, 142, 143, + 190, 255, 190, 128, 180, 182, 188, 130, + 132, 134, 140, 144, 147, 150, 155, 160, + 172, 178, 180, 182, 188, 128, 129, 130, + 131, 132, 133, 134, 176, 177, 178, 179, + 180, 181, 182, 183, 191, 255, 129, 147, + 149, 176, 178, 190, 192, 255, 144, 156, + 161, 144, 156, 165, 176, 130, 135, 149, + 164, 166, 168, 138, 147, 152, 157, 170, + 185, 188, 191, 142, 133, 137, 160, 255, + 137, 255, 128, 174, 176, 255, 159, 165, + 170, 180, 255, 167, 173, 128, 165, 176, + 255, 168, 174, 176, 190, 192, 255, 128, + 150, 160, 166, 168, 174, 176, 182, 184, + 190, 128, 134, 136, 142, 144, 150, 152, + 158, 160, 191, 128, 129, 130, 131, 132, + 133, 134, 135, 144, 145, 255, 133, 135, + 161, 175, 177, 181, 184, 188, 160, 151, + 152, 187, 192, 255, 133, 173, 177, 255, + 143, 159, 187, 255, 176, 191, 182, 183, + 184, 191, 192, 255, 150, 255, 128, 146, 147, 148, 152, 153, 154, 155, 156, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, - 175, 176, 129, 255, 141, 143, 160, 169, - 172, 255, 191, 128, 174, 130, 134, 139, - 163, 255, 130, 179, 187, 189, 178, 183, - 138, 165, 176, 255, 135, 159, 189, 255, - 132, 178, 143, 160, 164, 166, 175, 186, - 190, 128, 168, 186, 128, 130, 132, 139, - 160, 182, 190, 255, 176, 178, 180, 183, - 184, 190, 255, 128, 130, 155, 157, 160, - 170, 178, 180, 128, 162, 164, 169, 171, - 172, 173, 174, 175, 180, 181, 182, 183, - 185, 186, 187, 188, 189, 190, 191, 165, - 179, 157, 190, 128, 134, 147, 151, 159, - 168, 170, 182, 184, 188, 176, 180, 182, - 255, 161, 186, 144, 145, 146, 147, 148, - 150, 151, 152, 155, 157, 158, 160, 170, - 171, 172, 175, 161, 169, 128, 129, 130, - 131, 133, 138, 139, 140, 141, 142, 143, - 144, 145, 146, 147, 148, 149, 152, 156, - 157, 160, 161, 162, 163, 164, 166, 168, - 169, 170, 171, 172, 173, 174, 176, 177, - 153, 155, 178, 179, 145, 255, 139, 143, - 182, 255, 158, 175, 128, 144, 147, 149, - 151, 153, 179, 128, 135, 137, 164, 128, + 175, 176, 129, 255, 141, 255, 144, 189, + 141, 143, 172, 255, 191, 128, 175, 180, + 189, 151, 159, 162, 255, 175, 137, 138, + 184, 255, 183, 255, 168, 255, 128, 179, + 188, 134, 143, 154, 159, 184, 186, 190, + 255, 128, 173, 176, 255, 148, 159, 189, + 255, 129, 142, 154, 159, 191, 255, 128, + 182, 128, 141, 144, 153, 160, 182, 186, + 255, 128, 130, 155, 157, 160, 175, 178, + 182, 129, 134, 137, 142, 145, 150, 160, + 166, 168, 174, 176, 255, 155, 166, 175, + 128, 170, 172, 173, 176, 185, 158, 159, + 160, 255, 164, 175, 135, 138, 188, 255, + 164, 169, 171, 172, 173, 174, 175, 180, + 181, 182, 183, 184, 185, 187, 188, 189, + 190, 191, 165, 186, 174, 175, 154, 255, + 190, 128, 134, 147, 151, 157, 168, 170, + 182, 184, 188, 128, 129, 131, 132, 134, + 255, 147, 255, 190, 255, 144, 145, 136, + 175, 188, 255, 128, 143, 160, 175, 179, + 180, 141, 143, 176, 180, 182, 255, 189, + 255, 191, 144, 153, 161, 186, 129, 154, + 166, 255, 191, 255, 130, 135, 138, 143, + 146, 151, 154, 156, 144, 145, 146, 147, + 148, 150, 151, 152, 155, 157, 158, 160, + 170, 171, 172, 175, 161, 169, 128, 129, + 130, 131, 133, 135, 138, 139, 140, 141, + 142, 143, 144, 145, 146, 147, 148, 149, + 152, 156, 157, 160, 161, 162, 163, 164, + 166, 168, 169, 170, 171, 172, 173, 174, + 176, 177, 153, 155, 178, 179, 128, 139, + 141, 166, 168, 186, 188, 189, 191, 255, + 142, 143, 158, 255, 187, 255, 128, 180, + 189, 128, 156, 160, 255, 145, 159, 161, + 255, 128, 159, 176, 255, 139, 143, 187, + 255, 128, 157, 160, 255, 144, 132, 135, + 150, 255, 158, 159, 170, 175, 148, 151, + 188, 255, 128, 167, 176, 255, 164, 255, + 183, 255, 128, 149, 160, 167, 136, 188, + 128, 133, 138, 181, 183, 184, 191, 255, + 150, 159, 183, 255, 128, 158, 160, 178, + 180, 181, 128, 149, 160, 185, 128, 183, + 190, 191, 191, 128, 131, 133, 134, 140, + 147, 149, 151, 153, 179, 184, 186, 160, + 188, 128, 156, 128, 135, 137, 166, 128, + 181, 128, 149, 160, 178, 128, 145, 128, + 178, 129, 130, 131, 132, 133, 135, 136, + 138, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 155, 156, 162, + 163, 171, 176, 177, 178, 128, 134, 135, + 165, 176, 190, 144, 168, 176, 185, 128, + 180, 182, 191, 182, 144, 179, 155, 133, + 137, 141, 143, 157, 255, 190, 128, 145, + 147, 183, 136, 128, 134, 138, 141, 143, + 157, 159, 168, 176, 255, 171, 175, 186, + 255, 128, 131, 133, 140, 143, 144, 147, + 168, 170, 176, 178, 179, 181, 185, 188, + 191, 144, 151, 128, 132, 135, 136, 139, + 141, 157, 163, 166, 172, 176, 180, 128, + 138, 144, 153, 134, 136, 143, 154, 255, + 128, 181, 184, 255, 129, 151, 158, 255, + 129, 131, 133, 143, 154, 255, 128, 137, + 128, 153, 157, 171, 176, 185, 160, 255, + 170, 190, 192, 255, 128, 184, 128, 136, + 138, 182, 184, 191, 128, 144, 153, 178, + 255, 168, 144, 145, 183, 255, 128, 142, + 145, 149, 129, 141, 144, 146, 147, 148, + 175, 255, 132, 255, 128, 144, 129, 143, + 144, 153, 145, 152, 135, 255, 160, 168, + 169, 171, 172, 173, 174, 188, 189, 190, + 191, 161, 167, 185, 255, 128, 158, 160, + 169, 144, 173, 176, 180, 128, 131, 144, + 153, 163, 183, 189, 255, 144, 255, 133, + 143, 191, 255, 143, 159, 160, 128, 129, + 255, 159, 160, 171, 172, 255, 173, 255, + 179, 255, 128, 176, 177, 178, 128, 129, + 171, 175, 189, 255, 128, 136, 144, 153, + 157, 158, 133, 134, 137, 144, 145, 146, + 147, 148, 149, 154, 155, 156, 157, 158, + 159, 168, 169, 170, 150, 153, 165, 169, + 173, 178, 187, 255, 131, 132, 140, 169, + 174, 255, 130, 132, 149, 157, 173, 186, + 188, 160, 161, 163, 164, 167, 168, 132, + 134, 149, 157, 186, 139, 140, 191, 255, + 134, 128, 132, 138, 144, 146, 255, 166, + 167, 129, 155, 187, 149, 181, 143, 175, + 137, 169, 131, 140, 141, 192, 255, 128, + 182, 187, 255, 173, 180, 182, 255, 132, + 155, 159, 161, 175, 128, 160, 163, 164, + 165, 184, 185, 186, 161, 162, 128, 134, + 136, 152, 155, 161, 163, 164, 166, 170, + 133, 143, 151, 255, 139, 143, 154, 255, + 164, 167, 185, 187, 128, 131, 133, 159, + 161, 162, 169, 178, 180, 183, 130, 135, + 137, 139, 148, 151, 153, 155, 157, 159, + 164, 190, 141, 143, 145, 146, 161, 162, + 167, 170, 172, 178, 180, 183, 185, 188, + 128, 137, 139, 155, 161, 163, 165, 169, + 171, 187, 155, 156, 151, 255, 156, 157, + 160, 181, 255, 186, 187, 255, 162, 255, + 160, 168, 161, 167, 158, 255, 160, 132, + 135, 133, 134, 176, 255, 128, 191, 154, + 164, 168, 128, 149, 150, 191, 128, 152, + 153, 191, 181, 128, 159, 160, 189, 190, + 191, 189, 128, 131, 132, 185, 186, 191, + 144, 128, 151, 152, 161, 162, 176, 177, + 255, 169, 177, 129, 132, 141, 142, 145, + 146, 179, 181, 186, 188, 190, 191, 192, + 255, 142, 158, 128, 155, 156, 161, 162, + 175, 176, 177, 178, 191, 169, 177, 180, + 183, 128, 132, 133, 138, 139, 142, 143, + 144, 145, 146, 147, 185, 186, 191, 157, + 128, 152, 153, 158, 159, 177, 178, 180, + 181, 191, 142, 146, 169, 177, 180, 189, + 128, 132, 133, 185, 186, 191, 144, 185, + 128, 159, 160, 161, 162, 191, 169, 177, + 180, 189, 128, 132, 133, 140, 141, 142, + 143, 144, 145, 146, 147, 185, 186, 191, + 158, 177, 128, 155, 156, 161, 162, 191, + 131, 145, 155, 157, 128, 132, 133, 138, + 139, 141, 142, 149, 150, 152, 153, 159, + 160, 162, 163, 164, 165, 167, 168, 170, + 171, 173, 174, 185, 186, 191, 144, 128, + 191, 141, 145, 169, 189, 128, 132, 133, + 185, 186, 191, 128, 151, 152, 154, 155, + 159, 160, 161, 162, 191, 128, 141, 145, + 169, 180, 189, 129, 132, 133, 185, 186, + 191, 158, 128, 159, 160, 161, 162, 176, + 177, 178, 179, 191, 141, 145, 189, 128, + 132, 133, 186, 187, 191, 142, 128, 147, + 148, 150, 151, 158, 159, 161, 162, 185, + 186, 191, 178, 188, 128, 132, 133, 150, + 151, 153, 154, 189, 190, 191, 128, 134, + 135, 191, 128, 177, 129, 179, 180, 191, + 128, 131, 137, 141, 152, 160, 164, 166, + 172, 177, 189, 129, 132, 133, 134, 135, + 138, 139, 147, 148, 167, 168, 169, 170, + 179, 180, 191, 133, 128, 134, 135, 155, + 156, 159, 160, 191, 128, 129, 191, 136, + 128, 172, 173, 191, 128, 135, 136, 140, + 141, 191, 191, 128, 170, 171, 190, 161, + 128, 143, 144, 149, 150, 153, 154, 157, + 158, 164, 165, 166, 167, 173, 174, 176, + 177, 180, 181, 255, 130, 141, 143, 159, + 134, 187, 136, 140, 142, 143, 137, 151, + 153, 142, 143, 158, 159, 137, 177, 191, + 142, 143, 182, 183, 192, 255, 129, 151, + 128, 133, 134, 135, 136, 255, 145, 150, + 151, 155, 191, 192, 255, 128, 143, 144, + 159, 160, 255, 182, 183, 190, 191, 192, + 255, 128, 129, 255, 173, 174, 192, 255, + 128, 129, 154, 155, 159, 160, 255, 171, + 173, 185, 191, 192, 255, 141, 128, 145, + 146, 159, 160, 177, 178, 191, 173, 128, + 145, 146, 159, 160, 176, 177, 191, 128, + 179, 180, 191, 151, 156, 128, 191, 128, + 159, 160, 255, 184, 191, 192, 255, 169, + 128, 170, 171, 175, 176, 255, 182, 191, + 192, 255, 128, 158, 159, 191, 128, 143, + 144, 173, 174, 175, 176, 180, 181, 191, + 128, 171, 172, 175, 176, 255, 138, 191, + 192, 255, 128, 150, 151, 159, 160, 255, + 149, 191, 192, 255, 167, 128, 191, 128, + 132, 133, 179, 180, 191, 128, 132, 133, + 139, 140, 191, 128, 130, 131, 160, 161, + 173, 174, 175, 176, 185, 186, 255, 166, + 191, 192, 255, 128, 163, 164, 191, 128, + 140, 141, 143, 144, 153, 154, 189, 190, + 191, 128, 136, 137, 191, 173, 128, 168, + 169, 177, 178, 180, 181, 182, 183, 191, + 128, 255, 192, 255, 150, 151, 158, 159, + 152, 154, 156, 158, 134, 135, 142, 143, + 190, 191, 192, 255, 181, 189, 191, 128, + 190, 133, 181, 128, 129, 130, 140, 141, + 143, 144, 147, 148, 149, 150, 155, 156, + 159, 160, 172, 173, 177, 178, 188, 189, + 191, 177, 191, 128, 190, 128, 143, 144, + 156, 157, 191, 130, 135, 148, 164, 166, + 168, 128, 137, 138, 149, 150, 151, 152, + 157, 158, 169, 170, 185, 186, 187, 188, + 191, 142, 128, 132, 133, 137, 138, 159, + 160, 255, 137, 191, 192, 255, 175, 128, + 255, 159, 165, 170, 175, 177, 180, 191, + 192, 255, 166, 173, 128, 167, 168, 175, + 176, 255, 168, 174, 176, 191, 192, 255, + 167, 175, 183, 191, 128, 150, 151, 159, + 160, 190, 135, 143, 151, 128, 158, 159, + 191, 128, 132, 133, 135, 136, 160, 161, + 169, 170, 176, 177, 181, 182, 183, 184, + 188, 189, 191, 160, 151, 154, 187, 192, + 255, 128, 132, 133, 173, 174, 176, 177, + 255, 143, 159, 187, 191, 192, 255, 128, + 175, 176, 191, 150, 191, 192, 255, 141, + 191, 192, 255, 128, 143, 144, 189, 190, + 191, 141, 143, 160, 169, 172, 191, 192, + 255, 191, 128, 174, 175, 190, 128, 157, + 158, 159, 160, 255, 176, 191, 192, 255, + 128, 150, 151, 159, 160, 161, 162, 255, + 175, 137, 138, 184, 191, 192, 255, 128, + 182, 183, 255, 130, 134, 139, 163, 191, + 192, 255, 128, 129, 130, 179, 180, 191, + 187, 189, 128, 177, 178, 183, 184, 191, + 128, 137, 138, 165, 166, 175, 176, 255, + 135, 159, 189, 191, 192, 255, 128, 131, + 132, 178, 179, 191, 143, 165, 191, 128, + 159, 160, 175, 176, 185, 186, 190, 128, + 168, 169, 191, 131, 186, 128, 139, 140, + 159, 160, 182, 183, 189, 190, 255, 176, + 178, 180, 183, 184, 190, 191, 192, 255, + 129, 128, 130, 131, 154, 155, 157, 158, + 159, 160, 170, 171, 177, 178, 180, 181, + 191, 128, 167, 175, 129, 134, 135, 136, + 137, 142, 143, 144, 145, 150, 151, 159, + 160, 255, 155, 166, 175, 128, 162, 163, + 191, 164, 175, 135, 138, 188, 191, 192, + 255, 174, 175, 154, 191, 192, 255, 157, + 169, 183, 189, 191, 128, 134, 135, 146, + 147, 151, 152, 158, 159, 190, 130, 133, + 128, 255, 178, 191, 192, 255, 128, 146, + 147, 255, 190, 191, 192, 255, 128, 143, + 144, 255, 144, 145, 136, 175, 188, 191, + 192, 255, 181, 128, 175, 176, 255, 189, + 191, 192, 255, 128, 160, 161, 186, 187, + 191, 128, 129, 154, 155, 165, 166, 255, + 191, 192, 255, 128, 129, 130, 135, 136, + 137, 138, 143, 144, 145, 146, 151, 152, + 153, 154, 156, 157, 191, 128, 191, 128, + 129, 130, 131, 133, 138, 139, 140, 141, + 142, 143, 144, 145, 146, 147, 148, 149, + 152, 156, 157, 160, 161, 162, 163, 164, + 166, 168, 169, 170, 171, 172, 173, 174, + 176, 177, 132, 151, 153, 155, 158, 175, + 178, 179, 180, 191, 140, 167, 187, 190, + 128, 255, 142, 143, 158, 191, 192, 255, + 187, 191, 192, 255, 128, 180, 181, 191, + 128, 156, 157, 159, 160, 255, 145, 191, + 192, 255, 128, 159, 160, 175, 176, 255, + 139, 143, 182, 191, 192, 255, 144, 132, + 135, 150, 191, 192, 255, 158, 175, 148, + 151, 188, 191, 192, 255, 128, 167, 168, + 175, 176, 255, 164, 191, 192, 255, 183, + 191, 192, 255, 128, 149, 150, 159, 160, + 167, 168, 191, 136, 182, 188, 128, 133, + 134, 137, 138, 184, 185, 190, 191, 255, + 150, 159, 183, 191, 192, 255, 179, 128, + 159, 160, 181, 182, 191, 128, 149, 150, + 159, 160, 185, 186, 191, 128, 183, 184, + 189, 190, 191, 128, 148, 152, 129, 143, + 144, 179, 180, 191, 128, 159, 160, 188, + 189, 191, 128, 156, 157, 191, 136, 128, + 164, 165, 191, 128, 181, 182, 191, 128, + 149, 150, 159, 160, 178, 179, 191, 128, + 145, 146, 191, 128, 178, 179, 191, 128, 130, 131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 144, 145, 146, 147, 150, 151, 152, 153, 154, 156, 162, 163, 171, - 176, 177, 178, 131, 183, 131, 175, 144, - 168, 131, 166, 182, 144, 178, 131, 178, - 154, 156, 129, 132, 128, 145, 147, 171, - 159, 255, 144, 157, 161, 135, 138, 128, - 175, 135, 132, 133, 128, 174, 152, 155, - 132, 128, 170, 128, 153, 160, 190, 192, - 255, 128, 136, 138, 174, 128, 178, 255, - 160, 168, 169, 171, 172, 173, 174, 188, - 189, 190, 191, 161, 167, 144, 173, 128, - 131, 163, 183, 189, 255, 133, 143, 145, - 255, 147, 159, 128, 176, 177, 178, 128, - 136, 144, 153, 144, 145, 146, 147, 148, - 149, 154, 155, 156, 157, 158, 159, 150, - 153, 131, 140, 255, 160, 163, 164, 165, - 184, 185, 186, 161, 162, 133, 255, 170, - 181, 183, 186, 128, 150, 152, 182, 184, - 255, 192, 255, 128, 255, 173, 130, 133, - 146, 159, 165, 171, 175, 255, 181, 190, - 184, 185, 192, 255, 140, 134, 138, 142, - 161, 163, 255, 182, 130, 136, 137, 176, - 151, 152, 154, 160, 190, 136, 144, 192, - 255, 135, 129, 130, 132, 133, 144, 170, - 176, 178, 144, 154, 160, 191, 128, 169, - 174, 255, 148, 169, 157, 158, 189, 190, - 192, 255, 144, 255, 139, 140, 178, 255, - 186, 128, 181, 160, 161, 162, 163, 164, - 165, 166, 167, 168, 169, 170, 171, 172, - 173, 174, 175, 176, 177, 178, 179, 180, - 181, 182, 183, 184, 185, 186, 187, 188, - 189, 190, 191, 128, 173, 128, 155, 160, - 180, 182, 189, 148, 161, 163, 255, 176, - 164, 165, 132, 169, 177, 141, 142, 145, - 146, 179, 181, 186, 187, 158, 133, 134, - 137, 138, 143, 150, 152, 155, 164, 165, - 178, 255, 188, 129, 131, 133, 138, 143, - 144, 147, 168, 170, 176, 178, 179, 181, - 182, 184, 185, 190, 255, 157, 131, 134, - 137, 138, 142, 144, 146, 152, 159, 165, - 182, 255, 129, 131, 133, 141, 143, 145, - 147, 168, 170, 176, 178, 179, 181, 185, - 188, 255, 134, 138, 142, 143, 145, 159, - 164, 165, 176, 184, 186, 255, 129, 131, - 133, 140, 143, 144, 147, 168, 170, 176, - 178, 179, 181, 185, 188, 191, 177, 128, - 132, 135, 136, 139, 141, 150, 151, 156, - 157, 159, 163, 166, 175, 156, 130, 131, - 133, 138, 142, 144, 146, 149, 153, 154, - 158, 159, 163, 164, 168, 170, 174, 185, - 190, 191, 144, 151, 128, 130, 134, 136, - 138, 141, 166, 175, 128, 131, 133, 140, - 142, 144, 146, 168, 170, 185, 189, 255, - 133, 137, 151, 142, 148, 155, 159, 164, - 165, 176, 255, 128, 131, 133, 140, 142, - 144, 146, 168, 170, 179, 181, 185, 188, - 191, 158, 128, 132, 134, 136, 138, 141, - 149, 150, 160, 163, 166, 175, 177, 178, - 129, 131, 133, 140, 142, 144, 146, 186, - 189, 255, 133, 137, 143, 147, 152, 158, - 164, 165, 176, 185, 192, 255, 189, 130, - 131, 133, 150, 154, 177, 179, 187, 138, - 150, 128, 134, 143, 148, 152, 159, 166, - 175, 178, 179, 129, 186, 128, 142, 144, - 153, 132, 138, 141, 165, 167, 129, 130, - 135, 136, 148, 151, 153, 159, 161, 163, - 170, 171, 173, 185, 187, 189, 134, 128, - 132, 136, 141, 144, 153, 156, 159, 128, - 181, 183, 185, 152, 153, 160, 169, 190, - 191, 128, 135, 137, 172, 177, 191, 128, - 132, 134, 151, 153, 188, 134, 128, 129, - 130, 131, 137, 138, 139, 140, 141, 142, - 143, 144, 153, 154, 155, 156, 157, 158, - 159, 160, 161, 162, 163, 164, 165, 166, - 167, 168, 169, 170, 173, 175, 176, 177, - 178, 179, 181, 182, 183, 188, 189, 190, - 191, 132, 152, 172, 184, 185, 187, 128, - 191, 128, 137, 144, 255, 158, 159, 134, - 187, 136, 140, 142, 143, 137, 151, 153, - 142, 143, 158, 159, 137, 177, 142, 143, - 182, 183, 191, 255, 128, 130, 133, 136, - 150, 152, 255, 145, 150, 151, 155, 156, - 160, 168, 178, 255, 128, 143, 160, 255, - 182, 183, 190, 255, 129, 255, 173, 174, - 192, 255, 129, 154, 160, 255, 171, 173, - 185, 255, 128, 140, 142, 148, 160, 180, - 128, 147, 160, 172, 174, 176, 178, 179, - 148, 150, 152, 155, 158, 159, 170, 255, - 139, 141, 144, 153, 160, 255, 184, 255, - 128, 170, 176, 255, 182, 255, 128, 158, - 160, 171, 176, 187, 134, 173, 176, 180, - 128, 171, 176, 255, 138, 143, 155, 255, - 128, 155, 160, 255, 159, 189, 190, 192, - 255, 167, 128, 137, 144, 153, 176, 189, - 140, 143, 154, 170, 180, 255, 180, 255, - 128, 183, 128, 137, 141, 189, 128, 136, - 144, 146, 148, 182, 184, 185, 128, 181, - 187, 191, 150, 151, 158, 159, 152, 154, - 156, 158, 134, 135, 142, 143, 190, 255, - 190, 128, 180, 182, 188, 130, 132, 134, - 140, 144, 147, 150, 155, 160, 172, 178, - 180, 182, 188, 128, 129, 130, 131, 132, - 133, 134, 176, 177, 178, 179, 180, 181, - 182, 183, 191, 255, 129, 147, 149, 176, - 178, 190, 192, 255, 144, 156, 161, 144, - 156, 165, 176, 130, 135, 149, 164, 166, - 168, 138, 147, 152, 157, 170, 185, 188, - 191, 142, 133, 137, 160, 255, 137, 255, - 128, 174, 176, 255, 159, 165, 170, 180, - 255, 167, 173, 128, 165, 176, 255, 168, - 174, 176, 190, 192, 255, 128, 150, 160, - 166, 168, 174, 176, 182, 184, 190, 128, - 134, 136, 142, 144, 150, 152, 158, 160, - 191, 128, 129, 130, 131, 132, 133, 134, - 135, 144, 145, 255, 133, 135, 161, 175, - 177, 181, 184, 188, 160, 151, 152, 187, - 192, 255, 133, 173, 177, 255, 143, 159, - 187, 255, 176, 191, 182, 183, 184, 191, - 192, 255, 150, 255, 128, 146, 147, 148, - 152, 153, 154, 155, 156, 158, 159, 160, - 161, 162, 163, 164, 165, 166, 167, 168, - 169, 170, 171, 172, 173, 174, 175, 176, - 129, 255, 141, 255, 144, 189, 141, 143, - 172, 255, 191, 128, 175, 180, 189, 151, - 159, 162, 255, 175, 137, 138, 184, 255, - 183, 255, 168, 255, 128, 179, 188, 134, - 143, 154, 159, 184, 186, 190, 255, 128, - 173, 176, 255, 148, 159, 189, 255, 129, - 142, 154, 159, 191, 255, 128, 182, 128, - 141, 144, 153, 160, 182, 186, 255, 128, - 130, 155, 157, 160, 175, 178, 182, 129, - 134, 137, 142, 145, 150, 160, 166, 168, - 174, 176, 255, 155, 166, 175, 128, 170, - 172, 173, 176, 185, 158, 159, 160, 255, - 164, 175, 135, 138, 188, 255, 164, 169, - 171, 172, 173, 174, 175, 180, 181, 182, - 183, 184, 185, 187, 188, 189, 190, 191, - 165, 186, 174, 175, 154, 255, 190, 128, - 134, 147, 151, 157, 168, 170, 182, 184, - 188, 128, 129, 131, 132, 134, 255, 147, - 255, 190, 255, 144, 145, 136, 175, 188, - 255, 128, 143, 160, 175, 179, 180, 141, - 143, 176, 180, 182, 255, 189, 255, 191, - 144, 153, 161, 186, 129, 154, 166, 255, - 191, 255, 130, 135, 138, 143, 146, 151, - 154, 156, 144, 145, 146, 147, 148, 150, - 151, 152, 155, 157, 158, 160, 170, 171, - 172, 175, 161, 169, 128, 129, 130, 131, - 133, 135, 138, 139, 140, 141, 142, 143, - 144, 145, 146, 147, 148, 149, 152, 156, - 157, 160, 161, 162, 163, 164, 166, 168, - 169, 170, 171, 172, 173, 174, 176, 177, - 153, 155, 178, 179, 128, 139, 141, 166, - 168, 186, 188, 189, 191, 255, 142, 143, - 158, 255, 187, 255, 128, 180, 189, 128, - 156, 160, 255, 145, 159, 161, 255, 128, - 159, 176, 255, 139, 143, 187, 255, 128, - 157, 160, 255, 144, 132, 135, 150, 255, - 158, 159, 170, 175, 148, 151, 188, 255, - 128, 167, 176, 255, 164, 255, 183, 255, - 128, 149, 160, 167, 136, 188, 128, 133, - 138, 181, 183, 184, 191, 255, 150, 159, - 183, 255, 128, 158, 160, 178, 180, 181, - 128, 149, 160, 185, 128, 183, 190, 191, - 191, 128, 131, 133, 134, 140, 147, 149, - 151, 153, 179, 184, 186, 160, 188, 128, - 156, 128, 135, 137, 166, 128, 181, 128, - 149, 160, 178, 128, 145, 128, 178, 129, - 130, 131, 132, 133, 135, 136, 138, 139, - 140, 141, 144, 145, 146, 147, 150, 151, - 152, 153, 154, 155, 156, 162, 163, 171, - 176, 177, 178, 128, 134, 135, 165, 176, - 190, 144, 168, 176, 185, 128, 180, 182, - 191, 182, 144, 179, 155, 133, 137, 141, - 143, 157, 255, 190, 128, 145, 147, 183, - 136, 128, 134, 138, 141, 143, 157, 159, - 168, 176, 255, 171, 175, 186, 255, 128, - 131, 133, 140, 143, 144, 147, 168, 170, - 176, 178, 179, 181, 185, 188, 191, 144, - 151, 128, 132, 135, 136, 139, 141, 157, - 163, 166, 172, 176, 180, 128, 138, 144, - 153, 134, 136, 143, 154, 255, 128, 181, - 184, 255, 129, 151, 158, 255, 129, 131, - 133, 143, 154, 255, 128, 137, 128, 153, - 157, 171, 176, 185, 160, 255, 170, 190, - 192, 255, 128, 184, 128, 136, 138, 182, - 184, 191, 128, 144, 153, 178, 255, 168, - 144, 145, 183, 255, 128, 142, 145, 149, - 129, 141, 144, 146, 147, 148, 175, 255, - 132, 255, 128, 144, 129, 143, 144, 153, - 145, 152, 135, 255, 160, 168, 169, 171, - 172, 173, 174, 188, 189, 190, 191, 161, - 167, 185, 255, 128, 158, 160, 169, 144, - 173, 176, 180, 128, 131, 144, 153, 163, - 183, 189, 255, 144, 255, 133, 143, 191, - 255, 143, 159, 160, 128, 129, 255, 159, - 160, 171, 172, 255, 173, 255, 179, 255, - 128, 176, 177, 178, 128, 129, 171, 175, - 189, 255, 128, 136, 144, 153, 157, 158, - 133, 134, 137, 144, 145, 146, 147, 148, - 149, 154, 155, 156, 157, 158, 159, 168, - 169, 170, 150, 153, 165, 169, 173, 178, - 187, 255, 131, 132, 140, 169, 174, 255, - 130, 132, 149, 157, 173, 186, 188, 160, - 161, 163, 164, 167, 168, 132, 134, 149, - 157, 186, 139, 140, 191, 255, 134, 128, - 132, 138, 144, 146, 255, 166, 167, 129, + 176, 177, 178, 129, 191, 128, 130, 131, + 183, 184, 191, 128, 130, 131, 175, 176, + 191, 128, 143, 144, 168, 169, 191, 128, + 130, 131, 166, 167, 191, 182, 128, 143, + 144, 178, 179, 191, 128, 130, 131, 178, + 179, 191, 128, 154, 156, 129, 132, 133, + 191, 146, 128, 171, 172, 191, 135, 137, + 142, 158, 128, 168, 169, 175, 176, 255, + 159, 191, 192, 255, 144, 128, 156, 157, + 161, 162, 191, 128, 134, 135, 138, 139, + 191, 128, 175, 176, 191, 134, 128, 131, + 132, 135, 136, 191, 128, 174, 175, 191, + 128, 151, 152, 155, 156, 191, 132, 128, + 191, 128, 170, 171, 191, 128, 153, 154, + 191, 160, 190, 192, 255, 128, 184, 185, + 191, 137, 128, 174, 175, 191, 128, 129, + 177, 178, 255, 144, 191, 192, 255, 128, + 142, 143, 144, 145, 146, 149, 129, 148, + 150, 191, 175, 191, 192, 255, 132, 191, + 192, 255, 128, 144, 129, 143, 145, 191, + 144, 153, 128, 143, 145, 152, 154, 191, + 135, 191, 192, 255, 160, 168, 169, 171, + 172, 173, 174, 188, 189, 190, 191, 128, + 159, 161, 167, 170, 187, 185, 191, 192, + 255, 128, 143, 144, 173, 174, 191, 128, + 131, 132, 162, 163, 183, 184, 188, 189, + 255, 133, 143, 145, 191, 192, 255, 128, + 146, 147, 159, 160, 191, 160, 128, 191, + 128, 129, 191, 192, 255, 159, 160, 171, + 128, 170, 172, 191, 192, 255, 173, 191, + 192, 255, 179, 191, 192, 255, 128, 176, + 177, 178, 129, 191, 128, 129, 130, 191, + 171, 175, 189, 191, 192, 255, 128, 136, + 137, 143, 144, 153, 154, 191, 144, 145, + 146, 147, 148, 149, 154, 155, 156, 157, + 158, 159, 128, 143, 150, 153, 160, 191, + 149, 157, 173, 186, 188, 160, 161, 163, + 164, 167, 168, 132, 134, 149, 157, 186, + 191, 139, 140, 192, 255, 133, 145, 128, + 134, 135, 137, 138, 255, 166, 167, 129, 155, 187, 149, 181, 143, 175, 137, 169, - 131, 140, 141, 192, 255, 128, 182, 187, - 255, 173, 180, 182, 255, 132, 155, 159, - 161, 175, 128, 160, 163, 164, 165, 184, - 185, 186, 161, 162, 128, 134, 136, 152, - 155, 161, 163, 164, 166, 170, 133, 143, - 151, 255, 139, 143, 154, 255, 164, 167, - 185, 187, 128, 131, 133, 159, 161, 162, - 169, 178, 180, 183, 130, 135, 137, 139, - 148, 151, 153, 155, 157, 159, 164, 190, - 141, 143, 145, 146, 161, 162, 167, 170, - 172, 178, 180, 183, 185, 188, 128, 137, - 139, 155, 161, 163, 165, 169, 171, 187, - 155, 156, 151, 255, 156, 157, 160, 181, - 255, 186, 187, 255, 162, 255, 160, 168, - 161, 167, 158, 255, 160, 132, 135, 133, - 134, 176, 255, 128, 191, 154, 164, 168, - 128, 149, 150, 191, 128, 152, 153, 191, - 181, 128, 159, 160, 189, 190, 191, 189, - 128, 131, 132, 185, 186, 191, 144, 128, - 151, 152, 161, 162, 176, 177, 255, 169, - 177, 129, 132, 141, 142, 145, 146, 179, - 181, 186, 188, 190, 191, 192, 255, 142, - 158, 128, 155, 156, 161, 162, 175, 176, - 177, 178, 191, 169, 177, 180, 183, 128, - 132, 133, 138, 139, 142, 143, 144, 145, - 146, 147, 185, 186, 191, 157, 128, 152, - 153, 158, 159, 177, 178, 180, 181, 191, - 142, 146, 169, 177, 180, 189, 128, 132, - 133, 185, 186, 191, 144, 185, 128, 159, - 160, 161, 162, 191, 169, 177, 180, 189, - 128, 132, 133, 140, 141, 142, 143, 144, - 145, 146, 147, 185, 186, 191, 158, 177, - 128, 155, 156, 161, 162, 191, 131, 145, - 155, 157, 128, 132, 133, 138, 139, 141, - 142, 149, 150, 152, 153, 159, 160, 162, - 163, 164, 165, 167, 168, 170, 171, 173, - 174, 185, 186, 191, 144, 128, 191, 141, - 145, 169, 189, 128, 132, 133, 185, 186, - 191, 128, 151, 152, 154, 155, 159, 160, - 161, 162, 191, 128, 141, 145, 169, 180, - 189, 129, 132, 133, 185, 186, 191, 158, - 128, 159, 160, 161, 162, 176, 177, 178, - 179, 191, 141, 145, 189, 128, 132, 133, - 186, 187, 191, 142, 128, 147, 148, 150, - 151, 158, 159, 161, 162, 185, 186, 191, - 178, 188, 128, 132, 133, 150, 151, 153, - 154, 189, 190, 191, 128, 134, 135, 191, - 128, 177, 129, 179, 180, 191, 128, 131, - 137, 141, 152, 160, 164, 166, 172, 177, - 189, 129, 132, 133, 134, 135, 138, 139, - 147, 148, 167, 168, 169, 170, 179, 180, - 191, 133, 128, 134, 135, 155, 156, 159, - 160, 191, 128, 129, 191, 136, 128, 172, - 173, 191, 128, 135, 136, 140, 141, 191, - 191, 128, 170, 171, 190, 161, 128, 143, - 144, 149, 150, 153, 154, 157, 158, 164, - 165, 166, 167, 173, 174, 176, 177, 180, - 181, 255, 130, 141, 143, 159, 134, 187, - 136, 140, 142, 143, 137, 151, 153, 142, - 143, 158, 159, 137, 177, 191, 142, 143, - 182, 183, 192, 255, 129, 151, 128, 133, - 134, 135, 136, 255, 145, 150, 151, 155, - 191, 192, 255, 128, 143, 144, 159, 160, - 255, 182, 183, 190, 191, 192, 255, 128, - 129, 255, 173, 174, 192, 255, 128, 129, - 154, 155, 159, 160, 255, 171, 173, 185, - 191, 192, 255, 141, 128, 145, 146, 159, - 160, 177, 178, 191, 173, 128, 145, 146, - 159, 160, 176, 177, 191, 128, 179, 180, - 191, 151, 156, 128, 191, 128, 159, 160, - 255, 184, 191, 192, 255, 169, 128, 170, - 171, 175, 176, 255, 182, 191, 192, 255, - 128, 158, 159, 191, 128, 143, 144, 173, - 174, 175, 176, 180, 181, 191, 128, 171, - 172, 175, 176, 255, 138, 191, 192, 255, - 128, 150, 151, 159, 160, 255, 149, 191, - 192, 255, 167, 128, 191, 128, 132, 133, - 179, 180, 191, 128, 132, 133, 139, 140, - 191, 128, 130, 131, 160, 161, 173, 174, - 175, 176, 185, 186, 255, 166, 191, 192, - 255, 128, 163, 164, 191, 128, 140, 141, - 143, 144, 153, 154, 189, 190, 191, 128, - 136, 137, 191, 173, 128, 168, 169, 177, - 178, 180, 181, 182, 183, 191, 0, 127, - 192, 255, 150, 151, 158, 159, 152, 154, - 156, 158, 134, 135, 142, 143, 190, 191, - 192, 255, 181, 189, 191, 128, 190, 133, - 181, 128, 129, 130, 140, 141, 143, 144, - 147, 148, 149, 150, 155, 156, 159, 160, - 172, 173, 177, 178, 188, 189, 191, 177, - 191, 128, 190, 128, 143, 144, 156, 157, - 191, 130, 135, 148, 164, 166, 168, 128, - 137, 138, 149, 150, 151, 152, 157, 158, - 169, 170, 185, 186, 187, 188, 191, 142, - 128, 132, 133, 137, 138, 159, 160, 255, - 137, 191, 192, 255, 175, 128, 255, 159, - 165, 170, 175, 177, 180, 191, 192, 255, - 166, 173, 128, 167, 168, 175, 176, 255, - 168, 174, 176, 191, 192, 255, 167, 175, - 183, 191, 128, 150, 151, 159, 160, 190, - 135, 143, 151, 128, 158, 159, 191, 128, - 132, 133, 135, 136, 160, 161, 169, 170, - 176, 177, 181, 182, 183, 184, 188, 189, - 191, 160, 151, 154, 187, 192, 255, 128, - 132, 133, 173, 174, 176, 177, 255, 143, - 159, 187, 191, 192, 255, 128, 175, 176, - 191, 150, 191, 192, 255, 141, 191, 192, - 255, 128, 143, 144, 189, 190, 191, 141, - 143, 160, 169, 172, 191, 192, 255, 191, - 128, 174, 175, 190, 128, 157, 158, 159, - 160, 255, 176, 191, 192, 255, 128, 150, - 151, 159, 160, 161, 162, 255, 175, 137, - 138, 184, 191, 192, 255, 128, 182, 183, - 255, 130, 134, 139, 163, 191, 192, 255, - 128, 129, 130, 179, 180, 191, 187, 189, - 128, 177, 178, 183, 184, 191, 128, 137, - 138, 165, 166, 175, 176, 255, 135, 159, - 189, 191, 192, 255, 128, 131, 132, 178, - 179, 191, 143, 165, 191, 128, 159, 160, - 175, 176, 185, 186, 190, 128, 168, 169, - 191, 131, 186, 128, 139, 140, 159, 160, - 182, 183, 189, 190, 255, 176, 178, 180, - 183, 184, 190, 191, 192, 255, 129, 128, - 130, 131, 154, 155, 157, 158, 159, 160, - 170, 171, 177, 178, 180, 181, 191, 128, - 167, 175, 129, 134, 135, 136, 137, 142, - 143, 144, 145, 150, 151, 159, 160, 255, - 155, 166, 175, 128, 162, 163, 191, 164, - 175, 135, 138, 188, 191, 192, 255, 174, - 175, 154, 191, 192, 255, 157, 169, 183, - 189, 191, 128, 134, 135, 146, 147, 151, - 152, 158, 159, 190, 130, 133, 128, 255, - 178, 191, 192, 255, 128, 146, 147, 255, - 190, 191, 192, 255, 128, 143, 144, 255, - 144, 145, 136, 175, 188, 191, 192, 255, - 181, 128, 175, 176, 255, 189, 191, 192, - 255, 128, 160, 161, 186, 187, 191, 128, - 129, 154, 155, 165, 166, 255, 191, 192, - 255, 128, 129, 130, 135, 136, 137, 138, - 143, 144, 145, 146, 151, 152, 153, 154, - 156, 157, 191, 128, 191, 128, 129, 130, - 131, 133, 138, 139, 140, 141, 142, 143, - 144, 145, 146, 147, 148, 149, 152, 156, - 157, 160, 161, 162, 163, 164, 166, 168, - 169, 170, 171, 172, 173, 174, 176, 177, - 132, 151, 153, 155, 158, 175, 178, 179, - 180, 191, 140, 167, 187, 190, 128, 255, - 142, 143, 158, 191, 192, 255, 187, 191, - 192, 255, 128, 180, 181, 191, 128, 156, - 157, 159, 160, 255, 145, 191, 192, 255, - 128, 159, 160, 175, 176, 255, 139, 143, - 182, 191, 192, 255, 144, 132, 135, 150, - 191, 192, 255, 158, 175, 148, 151, 188, - 191, 192, 255, 128, 167, 168, 175, 176, - 255, 164, 191, 192, 255, 183, 191, 192, - 255, 128, 149, 150, 159, 160, 167, 168, - 191, 136, 182, 188, 128, 133, 134, 137, - 138, 184, 185, 190, 191, 255, 150, 159, - 183, 191, 192, 255, 179, 128, 159, 160, - 181, 182, 191, 128, 149, 150, 159, 160, - 185, 186, 191, 128, 183, 184, 189, 190, - 191, 128, 148, 152, 129, 143, 144, 179, - 180, 191, 128, 159, 160, 188, 189, 191, - 128, 156, 157, 191, 136, 128, 164, 165, - 191, 128, 181, 182, 191, 128, 149, 150, - 159, 160, 178, 179, 191, 128, 145, 146, - 191, 128, 178, 179, 191, 128, 130, 131, - 132, 133, 134, 135, 136, 138, 139, 140, - 141, 144, 145, 146, 147, 150, 151, 152, - 153, 154, 156, 162, 163, 171, 176, 177, - 178, 129, 191, 128, 130, 131, 183, 184, - 191, 128, 130, 131, 175, 176, 191, 128, - 143, 144, 168, 169, 191, 128, 130, 131, - 166, 167, 191, 182, 128, 143, 144, 178, - 179, 191, 128, 130, 131, 178, 179, 191, - 128, 154, 156, 129, 132, 133, 191, 146, - 128, 171, 172, 191, 135, 137, 142, 158, - 128, 168, 169, 175, 176, 255, 159, 191, - 192, 255, 144, 128, 156, 157, 161, 162, - 191, 128, 134, 135, 138, 139, 191, 128, - 175, 176, 191, 134, 128, 131, 132, 135, - 136, 191, 128, 174, 175, 191, 128, 151, - 152, 155, 156, 191, 132, 128, 191, 128, - 170, 171, 191, 128, 153, 154, 191, 160, - 190, 192, 255, 128, 184, 185, 191, 137, - 128, 174, 175, 191, 128, 129, 177, 178, - 255, 144, 191, 192, 255, 128, 142, 143, - 144, 145, 146, 149, 129, 148, 150, 191, - 175, 191, 192, 255, 132, 191, 192, 255, - 128, 144, 129, 143, 145, 191, 144, 153, - 128, 143, 145, 152, 154, 191, 135, 191, - 192, 255, 160, 168, 169, 171, 172, 173, - 174, 188, 189, 190, 191, 128, 159, 161, - 167, 170, 187, 185, 191, 192, 255, 128, - 143, 144, 173, 174, 191, 128, 131, 132, - 162, 163, 183, 184, 188, 189, 255, 133, - 143, 145, 191, 192, 255, 128, 146, 147, - 159, 160, 191, 160, 128, 191, 128, 129, - 191, 192, 255, 159, 160, 171, 128, 170, - 172, 191, 192, 255, 173, 191, 192, 255, - 179, 191, 192, 255, 128, 176, 177, 178, - 129, 191, 128, 129, 130, 191, 171, 175, - 189, 191, 192, 255, 128, 136, 137, 143, - 144, 153, 154, 191, 144, 145, 146, 147, - 148, 149, 154, 155, 156, 157, 158, 159, - 128, 143, 150, 153, 160, 191, 149, 157, - 173, 186, 188, 160, 161, 163, 164, 167, - 168, 132, 134, 149, 157, 186, 191, 139, - 140, 192, 255, 133, 145, 128, 134, 135, - 137, 138, 255, 166, 167, 129, 155, 187, - 149, 181, 143, 175, 137, 169, 131, 140, - 191, 192, 255, 160, 163, 164, 165, 184, - 185, 186, 128, 159, 161, 162, 166, 191, - 133, 191, 192, 255, 132, 160, 163, 167, - 179, 184, 186, 128, 164, 165, 168, 169, - 187, 188, 191, 130, 135, 137, 139, 144, - 147, 151, 153, 155, 157, 159, 163, 171, - 179, 184, 189, 191, 128, 140, 141, 148, - 149, 160, 161, 164, 165, 166, 167, 190, - 138, 164, 170, 128, 155, 156, 160, 161, - 187, 188, 191, 128, 191, 155, 156, 128, - 191, 151, 191, 192, 255, 156, 157, 160, - 128, 191, 181, 191, 192, 255, 158, 159, - 186, 128, 185, 187, 191, 192, 255, 162, - 191, 192, 255, 160, 168, 128, 159, 161, - 167, 169, 191, 158, 191, 192, 255, 123, - 128, 191, 128, 191, 128, 191, 128, 191, - 128, 191, 10, 123, 128, 191, 128, 191, - 128, 191, 123, 123, 10, 123, 128, 191, - 128, 191, 128, 191, 123, 123, 170, 181, - 183, 186, 128, 150, 152, 182, 184, 255, - 192, 255, 128, 255, 173, 130, 133, 146, - 159, 165, 171, 175, 255, 181, 190, 184, - 185, 192, 255, 140, 134, 138, 142, 161, - 163, 255, 182, 130, 136, 137, 176, 151, - 152, 154, 160, 190, 136, 144, 192, 255, - 135, 129, 130, 132, 133, 144, 170, 176, - 178, 144, 154, 160, 191, 128, 169, 174, - 255, 148, 169, 157, 158, 189, 190, 192, - 255, 144, 255, 139, 140, 178, 255, 186, - 128, 181, 160, 161, 162, 163, 164, 165, - 166, 167, 168, 169, 170, 171, 172, 173, - 174, 175, 176, 177, 178, 179, 180, 181, - 182, 183, 184, 185, 186, 187, 188, 189, - 190, 191, 128, 173, 128, 155, 160, 180, - 182, 189, 148, 161, 163, 255, 176, 164, - 165, 132, 169, 177, 141, 142, 145, 146, - 179, 181, 186, 187, 158, 133, 134, 137, - 138, 143, 150, 152, 155, 164, 165, 178, - 255, 188, 129, 131, 133, 138, 143, 144, - 147, 168, 170, 176, 178, 179, 181, 182, - 184, 185, 190, 255, 157, 131, 134, 137, - 138, 142, 144, 146, 152, 159, 165, 182, - 255, 129, 131, 133, 141, 143, 145, 147, - 168, 170, 176, 178, 179, 181, 185, 188, - 255, 134, 138, 142, 143, 145, 159, 164, - 165, 176, 184, 186, 255, 129, 131, 133, - 140, 143, 144, 147, 168, 170, 176, 178, - 179, 181, 185, 188, 191, 177, 128, 132, - 135, 136, 139, 141, 150, 151, 156, 157, - 159, 163, 166, 175, 156, 130, 131, 133, - 138, 142, 144, 146, 149, 153, 154, 158, - 159, 163, 164, 168, 170, 174, 185, 190, - 191, 144, 151, 128, 130, 134, 136, 138, - 141, 166, 175, 128, 131, 133, 140, 142, - 144, 146, 168, 170, 185, 189, 255, 133, - 137, 151, 142, 148, 155, 159, 164, 165, - 176, 255, 128, 131, 133, 140, 142, 144, - 146, 168, 170, 179, 181, 185, 188, 191, - 158, 128, 132, 134, 136, 138, 141, 149, - 150, 160, 163, 166, 175, 177, 178, 129, - 131, 133, 140, 142, 144, 146, 186, 189, - 255, 133, 137, 143, 147, 152, 158, 164, - 165, 176, 185, 192, 255, 189, 130, 131, - 133, 150, 154, 177, 179, 187, 138, 150, - 128, 134, 143, 148, 152, 159, 166, 175, - 178, 179, 129, 186, 128, 142, 144, 153, - 132, 138, 141, 165, 167, 129, 130, 135, - 136, 148, 151, 153, 159, 161, 163, 170, - 171, 173, 185, 187, 189, 134, 128, 132, - 136, 141, 144, 153, 156, 159, 128, 181, - 183, 185, 152, 153, 160, 169, 190, 191, - 128, 135, 137, 172, 177, 191, 128, 132, - 134, 151, 153, 188, 134, 128, 129, 130, - 131, 137, 138, 139, 140, 141, 142, 143, - 144, 153, 154, 155, 156, 157, 158, 159, - 160, 161, 162, 163, 164, 165, 166, 167, - 168, 169, 170, 173, 175, 176, 177, 178, - 179, 181, 182, 183, 188, 189, 190, 191, - 132, 152, 172, 184, 185, 187, 128, 191, - 128, 137, 144, 255, 158, 159, 134, 187, - 136, 140, 142, 143, 137, 151, 153, 142, - 143, 158, 159, 137, 177, 142, 143, 182, - 183, 191, 255, 128, 130, 133, 136, 150, - 152, 255, 145, 150, 151, 155, 156, 160, - 168, 178, 255, 128, 143, 160, 255, 182, - 183, 190, 255, 129, 255, 173, 174, 192, - 255, 129, 154, 160, 255, 171, 173, 185, - 255, 128, 140, 142, 148, 160, 180, 128, - 147, 160, 172, 174, 176, 178, 179, 148, - 150, 152, 155, 158, 159, 170, 255, 139, - 141, 144, 153, 160, 255, 184, 255, 128, - 170, 176, 255, 182, 255, 128, 158, 160, - 171, 176, 187, 134, 173, 176, 180, 128, - 171, 176, 255, 138, 143, 155, 255, 128, - 155, 160, 255, 159, 189, 190, 192, 255, - 167, 128, 137, 144, 153, 176, 189, 140, - 143, 154, 170, 180, 255, 180, 255, 128, - 183, 128, 137, 141, 189, 128, 136, 144, - 146, 148, 182, 184, 185, 128, 181, 187, - 191, 150, 151, 158, 159, 152, 154, 156, - 158, 134, 135, 142, 143, 190, 255, 190, - 128, 180, 182, 188, 130, 132, 134, 140, - 144, 147, 150, 155, 160, 172, 178, 180, - 182, 188, 128, 129, 130, 131, 132, 133, - 134, 176, 177, 178, 179, 180, 181, 182, - 183, 191, 255, 129, 147, 149, 176, 178, - 190, 192, 255, 144, 156, 161, 144, 156, - 165, 176, 130, 135, 149, 164, 166, 168, - 138, 147, 152, 157, 170, 185, 188, 191, - 142, 133, 137, 160, 255, 137, 255, 128, - 174, 176, 255, 159, 165, 170, 180, 255, - 167, 173, 128, 165, 176, 255, 168, 174, - 176, 190, 192, 255, 128, 150, 160, 166, - 168, 174, 176, 182, 184, 190, 128, 134, - 136, 142, 144, 150, 152, 158, 160, 191, - 128, 129, 130, 131, 132, 133, 134, 135, - 144, 145, 255, 133, 135, 161, 175, 177, - 181, 184, 188, 160, 151, 152, 187, 192, - 255, 133, 173, 177, 255, 143, 159, 187, - 255, 176, 191, 182, 183, 184, 191, 192, - 255, 150, 255, 128, 146, 147, 148, 152, - 153, 154, 155, 156, 158, 159, 160, 161, + 131, 140, 191, 192, 255, 160, 163, 164, + 165, 184, 185, 186, 128, 159, 161, 162, + 166, 191, 133, 191, 192, 255, 132, 160, + 163, 167, 179, 184, 186, 128, 164, 165, + 168, 169, 187, 188, 191, 130, 135, 137, + 139, 144, 147, 151, 153, 155, 157, 159, + 163, 171, 179, 184, 189, 191, 128, 140, + 141, 148, 149, 160, 161, 164, 165, 166, + 167, 190, 138, 164, 170, 128, 155, 156, + 160, 161, 187, 188, 191, 128, 191, 155, + 156, 128, 191, 151, 191, 192, 255, 156, + 157, 160, 128, 191, 181, 191, 192, 255, + 158, 159, 186, 128, 185, 187, 191, 192, + 255, 162, 191, 192, 255, 160, 168, 128, + 159, 161, 167, 169, 191, 158, 191, 192, + 255, 123, 123, 128, 191, 128, 191, 128, + 191, 128, 191, 128, 191, 10, 123, 123, + 128, 191, 128, 191, 128, 191, 123, 123, + 10, 123, 128, 191, 128, 191, 128, 191, + 123, 123, 170, 181, 183, 186, 128, 150, + 152, 182, 184, 255, 192, 255, 128, 255, + 173, 130, 133, 146, 159, 165, 171, 175, + 255, 181, 190, 184, 185, 192, 255, 140, + 134, 138, 142, 161, 163, 255, 182, 130, + 136, 137, 176, 151, 152, 154, 160, 190, + 136, 144, 192, 255, 135, 129, 130, 132, + 133, 144, 170, 176, 178, 144, 154, 160, + 191, 128, 169, 174, 255, 148, 169, 157, + 158, 189, 190, 192, 255, 144, 255, 139, + 140, 178, 255, 186, 128, 181, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, - 170, 171, 172, 173, 174, 175, 176, 129, - 255, 141, 255, 144, 189, 141, 143, 172, - 255, 191, 128, 175, 180, 189, 151, 159, - 162, 255, 175, 137, 138, 184, 255, 183, - 255, 168, 255, 128, 179, 188, 134, 143, - 154, 159, 184, 186, 190, 255, 128, 173, - 176, 255, 148, 159, 189, 255, 129, 142, - 154, 159, 191, 255, 128, 182, 128, 141, - 144, 153, 160, 182, 186, 255, 128, 130, - 155, 157, 160, 175, 178, 182, 129, 134, - 137, 142, 145, 150, 160, 166, 168, 174, - 176, 255, 155, 166, 175, 128, 170, 172, - 173, 176, 185, 158, 159, 160, 255, 164, - 175, 135, 138, 188, 255, 164, 169, 171, - 172, 173, 174, 175, 180, 181, 182, 183, - 184, 185, 187, 188, 189, 190, 191, 165, - 186, 174, 175, 154, 255, 190, 128, 134, - 147, 151, 157, 168, 170, 182, 184, 188, - 128, 129, 131, 132, 134, 255, 147, 255, - 190, 255, 144, 145, 136, 175, 188, 255, - 128, 143, 160, 175, 179, 180, 141, 143, - 176, 180, 182, 255, 189, 255, 191, 144, - 153, 161, 186, 129, 154, 166, 255, 191, - 255, 130, 135, 138, 143, 146, 151, 154, - 156, 144, 145, 146, 147, 148, 150, 151, - 152, 155, 157, 158, 160, 170, 171, 172, - 175, 161, 169, 128, 129, 130, 131, 133, - 135, 138, 139, 140, 141, 142, 143, 144, - 145, 146, 147, 148, 149, 152, 156, 157, - 160, 161, 162, 163, 164, 166, 168, 169, - 170, 171, 172, 173, 174, 176, 177, 153, - 155, 178, 179, 128, 139, 141, 166, 168, - 186, 188, 189, 191, 255, 142, 143, 158, - 255, 187, 255, 128, 180, 189, 128, 156, - 160, 255, 145, 159, 161, 255, 128, 159, - 176, 255, 139, 143, 187, 255, 128, 157, - 160, 255, 144, 132, 135, 150, 255, 158, - 159, 170, 175, 148, 151, 188, 255, 128, - 167, 176, 255, 164, 255, 183, 255, 128, - 149, 160, 167, 136, 188, 128, 133, 138, - 181, 183, 184, 191, 255, 150, 159, 183, - 255, 128, 158, 160, 178, 180, 181, 128, - 149, 160, 185, 128, 183, 190, 191, 191, - 128, 131, 133, 134, 140, 147, 149, 151, - 153, 179, 184, 186, 160, 188, 128, 156, - 128, 135, 137, 166, 128, 181, 128, 149, - 160, 178, 128, 145, 128, 178, 129, 130, - 131, 132, 133, 135, 136, 138, 139, 140, - 141, 144, 145, 146, 147, 150, 151, 152, - 153, 154, 155, 156, 162, 163, 171, 176, - 177, 178, 128, 134, 135, 165, 176, 190, - 144, 168, 176, 185, 128, 180, 182, 191, - 182, 144, 179, 155, 133, 137, 141, 143, - 157, 255, 190, 128, 145, 147, 183, 136, - 128, 134, 138, 141, 143, 157, 159, 168, - 176, 255, 171, 175, 186, 255, 128, 131, - 133, 140, 143, 144, 147, 168, 170, 176, - 178, 179, 181, 185, 188, 191, 144, 151, - 128, 132, 135, 136, 139, 141, 157, 163, - 166, 172, 176, 180, 128, 138, 144, 153, - 134, 136, 143, 154, 255, 128, 181, 184, - 255, 129, 151, 158, 255, 129, 131, 133, - 143, 154, 255, 128, 137, 128, 153, 157, - 171, 176, 185, 160, 255, 170, 190, 192, - 255, 128, 184, 128, 136, 138, 182, 184, - 191, 128, 144, 153, 178, 255, 168, 144, - 145, 183, 255, 128, 142, 145, 149, 129, - 141, 144, 146, 147, 148, 175, 255, 132, - 255, 128, 144, 129, 143, 144, 153, 145, - 152, 135, 255, 160, 168, 169, 171, 172, - 173, 174, 188, 189, 190, 191, 161, 167, - 185, 255, 128, 158, 160, 169, 144, 173, - 176, 180, 128, 131, 144, 153, 163, 183, - 189, 255, 144, 255, 133, 143, 191, 255, - 143, 159, 160, 128, 129, 255, 159, 160, - 171, 172, 255, 173, 255, 179, 255, 128, - 176, 177, 178, 128, 129, 171, 175, 189, - 255, 128, 136, 144, 153, 157, 158, 133, - 134, 137, 144, 145, 146, 147, 148, 149, - 154, 155, 156, 157, 158, 159, 168, 169, - 170, 150, 153, 165, 169, 173, 178, 187, - 255, 131, 132, 140, 169, 174, 255, 130, - 132, 149, 157, 173, 186, 188, 160, 161, - 163, 164, 167, 168, 132, 134, 149, 157, - 186, 139, 140, 191, 255, 134, 128, 132, - 138, 144, 146, 255, 166, 167, 129, 155, - 187, 149, 181, 143, 175, 137, 169, 131, - 140, 141, 192, 255, 128, 182, 187, 255, - 173, 180, 182, 255, 132, 155, 159, 161, - 175, 128, 160, 163, 164, 165, 184, 185, - 186, 161, 162, 128, 134, 136, 152, 155, - 161, 163, 164, 166, 170, 133, 143, 151, - 255, 139, 143, 154, 255, 164, 167, 185, - 187, 128, 131, 133, 159, 161, 162, 169, - 178, 180, 183, 130, 135, 137, 139, 148, - 151, 153, 155, 157, 159, 164, 190, 141, - 143, 145, 146, 161, 162, 167, 170, 172, - 178, 180, 183, 185, 188, 128, 137, 139, - 155, 161, 163, 165, 169, 171, 187, 155, - 156, 151, 255, 156, 157, 160, 181, 255, - 186, 187, 255, 162, 255, 160, 168, 161, - 167, 158, 255, 160, 132, 135, 133, 134, - 176, 255, 128, 191, 154, 164, 168, 128, - 149, 150, 191, 128, 152, 153, 191, 181, - 128, 159, 160, 189, 190, 191, 189, 128, - 131, 132, 185, 186, 191, 144, 128, 151, - 152, 161, 162, 176, 177, 255, 169, 177, - 129, 132, 141, 142, 145, 146, 179, 181, - 186, 188, 190, 191, 192, 255, 142, 158, - 128, 155, 156, 161, 162, 175, 176, 177, - 178, 191, 169, 177, 180, 183, 128, 132, - 133, 138, 139, 142, 143, 144, 145, 146, - 147, 185, 186, 191, 157, 128, 152, 153, - 158, 159, 177, 178, 180, 181, 191, 142, - 146, 169, 177, 180, 189, 128, 132, 133, - 185, 186, 191, 144, 185, 128, 159, 160, - 161, 162, 191, 169, 177, 180, 189, 128, - 132, 133, 140, 141, 142, 143, 144, 145, - 146, 147, 185, 186, 191, 158, 177, 128, - 155, 156, 161, 162, 191, 131, 145, 155, - 157, 128, 132, 133, 138, 139, 141, 142, - 149, 150, 152, 153, 159, 160, 162, 163, - 164, 165, 167, 168, 170, 171, 173, 174, - 185, 186, 191, 144, 128, 191, 141, 145, - 169, 189, 128, 132, 133, 185, 186, 191, - 128, 151, 152, 154, 155, 159, 160, 161, - 162, 191, 128, 141, 145, 169, 180, 189, - 129, 132, 133, 185, 186, 191, 158, 128, - 159, 160, 161, 162, 176, 177, 178, 179, - 191, 141, 145, 189, 128, 132, 133, 186, - 187, 191, 142, 128, 147, 148, 150, 151, - 158, 159, 161, 162, 185, 186, 191, 178, - 188, 128, 132, 133, 150, 151, 153, 154, - 189, 190, 191, 128, 134, 135, 191, 128, - 177, 129, 179, 180, 191, 128, 131, 137, - 141, 152, 160, 164, 166, 172, 177, 189, - 129, 132, 133, 134, 135, 138, 139, 147, - 148, 167, 168, 169, 170, 179, 180, 191, - 133, 128, 134, 135, 155, 156, 159, 160, - 191, 128, 129, 191, 136, 128, 172, 173, - 191, 128, 135, 136, 140, 141, 191, 191, - 128, 170, 171, 190, 161, 128, 143, 144, - 149, 150, 153, 154, 157, 158, 164, 165, - 166, 167, 173, 174, 176, 177, 180, 181, - 255, 130, 141, 143, 159, 134, 187, 136, - 140, 142, 143, 137, 151, 153, 142, 143, - 158, 159, 137, 177, 191, 142, 143, 182, - 183, 192, 255, 129, 151, 128, 133, 134, - 135, 136, 255, 145, 150, 151, 155, 191, - 192, 255, 128, 143, 144, 159, 160, 255, - 182, 183, 190, 191, 192, 255, 128, 129, - 255, 173, 174, 192, 255, 128, 129, 154, - 155, 159, 160, 255, 171, 173, 185, 191, - 192, 255, 141, 128, 145, 146, 159, 160, - 177, 178, 191, 173, 128, 145, 146, 159, - 160, 176, 177, 191, 128, 179, 180, 191, - 151, 156, 128, 191, 128, 159, 160, 255, - 184, 191, 192, 255, 169, 128, 170, 171, - 175, 176, 255, 182, 191, 192, 255, 128, - 158, 159, 191, 128, 143, 144, 173, 174, - 175, 176, 180, 181, 191, 128, 171, 172, - 175, 176, 255, 138, 191, 192, 255, 128, - 150, 151, 159, 160, 255, 149, 191, 192, - 255, 167, 128, 191, 128, 132, 133, 179, - 180, 191, 128, 132, 133, 139, 140, 191, - 128, 130, 131, 160, 161, 173, 174, 175, - 176, 185, 186, 255, 166, 191, 192, 255, - 128, 163, 164, 191, 128, 140, 141, 143, - 144, 153, 154, 189, 190, 191, 128, 136, - 137, 191, 173, 128, 168, 169, 177, 178, - 180, 181, 182, 183, 191, 0, 127, 192, - 255, 150, 151, 158, 159, 152, 154, 156, - 158, 134, 135, 142, 143, 190, 191, 192, - 255, 181, 189, 191, 128, 190, 133, 181, - 128, 129, 130, 140, 141, 143, 144, 147, - 148, 149, 150, 155, 156, 159, 160, 172, - 173, 177, 178, 188, 189, 191, 177, 191, - 128, 190, 128, 143, 144, 156, 157, 191, - 130, 135, 148, 164, 166, 168, 128, 137, - 138, 149, 150, 151, 152, 157, 158, 169, - 170, 185, 186, 187, 188, 191, 142, 128, - 132, 133, 137, 138, 159, 160, 255, 137, - 191, 192, 255, 175, 128, 255, 159, 165, - 170, 175, 177, 180, 191, 192, 255, 166, - 173, 128, 167, 168, 175, 176, 255, 168, - 174, 176, 191, 192, 255, 167, 175, 183, - 191, 128, 150, 151, 159, 160, 190, 135, - 143, 151, 128, 158, 159, 191, 128, 132, - 133, 135, 136, 160, 161, 169, 170, 176, - 177, 181, 182, 183, 184, 188, 189, 191, - 160, 151, 154, 187, 192, 255, 128, 132, - 133, 173, 174, 176, 177, 255, 143, 159, - 187, 191, 192, 255, 128, 175, 176, 191, - 150, 191, 192, 255, 141, 191, 192, 255, - 128, 143, 144, 189, 190, 191, 141, 143, - 160, 169, 172, 191, 192, 255, 191, 128, - 174, 175, 190, 128, 157, 158, 159, 160, - 255, 176, 191, 192, 255, 128, 150, 151, - 159, 160, 161, 162, 255, 175, 137, 138, - 184, 191, 192, 255, 128, 182, 183, 255, - 130, 134, 139, 163, 191, 192, 255, 128, - 129, 130, 179, 180, 191, 187, 189, 128, - 177, 178, 183, 184, 191, 128, 137, 138, - 165, 166, 175, 176, 255, 135, 159, 189, - 191, 192, 255, 128, 131, 132, 178, 179, - 191, 143, 165, 191, 128, 159, 160, 175, - 176, 185, 186, 190, 128, 168, 169, 191, - 131, 186, 128, 139, 140, 159, 160, 182, - 183, 189, 190, 255, 176, 178, 180, 183, - 184, 190, 191, 192, 255, 129, 128, 130, - 131, 154, 155, 157, 158, 159, 160, 170, - 171, 177, 178, 180, 181, 191, 128, 167, - 175, 129, 134, 135, 136, 137, 142, 143, - 144, 145, 150, 151, 159, 160, 255, 155, - 166, 175, 128, 162, 163, 191, 164, 175, - 135, 138, 188, 191, 192, 255, 174, 175, - 154, 191, 192, 255, 157, 169, 183, 189, - 191, 128, 134, 135, 146, 147, 151, 152, - 158, 159, 190, 130, 133, 128, 255, 178, - 191, 192, 255, 128, 146, 147, 255, 190, - 191, 192, 255, 128, 143, 144, 255, 144, - 145, 136, 175, 188, 191, 192, 255, 181, - 128, 175, 176, 255, 189, 191, 192, 255, - 128, 160, 161, 186, 187, 191, 128, 129, - 154, 155, 165, 166, 255, 191, 192, 255, - 128, 129, 130, 135, 136, 137, 138, 143, - 144, 145, 146, 151, 152, 153, 154, 156, - 157, 191, 128, 191, 128, 129, 130, 131, - 133, 138, 139, 140, 141, 142, 143, 144, - 145, 146, 147, 148, 149, 152, 156, 157, - 160, 161, 162, 163, 164, 166, 168, 169, - 170, 171, 172, 173, 174, 176, 177, 132, - 151, 153, 155, 158, 175, 178, 179, 180, - 191, 140, 167, 187, 190, 128, 255, 142, - 143, 158, 191, 192, 255, 187, 191, 192, - 255, 128, 180, 181, 191, 128, 156, 157, - 159, 160, 255, 145, 191, 192, 255, 128, - 159, 160, 175, 176, 255, 139, 143, 182, - 191, 192, 255, 144, 132, 135, 150, 191, - 192, 255, 158, 175, 148, 151, 188, 191, - 192, 255, 128, 167, 168, 175, 176, 255, - 164, 191, 192, 255, 183, 191, 192, 255, - 128, 149, 150, 159, 160, 167, 168, 191, - 136, 182, 188, 128, 133, 134, 137, 138, - 184, 185, 190, 191, 255, 150, 159, 183, - 191, 192, 255, 179, 128, 159, 160, 181, - 182, 191, 128, 149, 150, 159, 160, 185, - 186, 191, 128, 183, 184, 189, 190, 191, - 128, 148, 152, 129, 143, 144, 179, 180, - 191, 128, 159, 160, 188, 189, 191, 128, - 156, 157, 191, 136, 128, 164, 165, 191, - 128, 181, 182, 191, 128, 149, 150, 159, - 160, 178, 179, 191, 128, 145, 146, 191, - 128, 178, 179, 191, 128, 130, 131, 132, - 133, 134, 135, 136, 138, 139, 140, 141, - 144, 145, 146, 147, 150, 151, 152, 153, - 154, 156, 162, 163, 171, 176, 177, 178, - 129, 191, 128, 130, 131, 183, 184, 191, - 128, 130, 131, 175, 176, 191, 128, 143, - 144, 168, 169, 191, 128, 130, 131, 166, - 167, 191, 182, 128, 143, 144, 178, 179, - 191, 128, 130, 131, 178, 179, 191, 128, - 154, 156, 129, 132, 133, 191, 146, 128, - 171, 172, 191, 135, 137, 142, 158, 128, - 168, 169, 175, 176, 255, 159, 191, 192, - 255, 144, 128, 156, 157, 161, 162, 191, - 128, 134, 135, 138, 139, 191, 128, 175, - 176, 191, 134, 128, 131, 132, 135, 136, - 191, 128, 174, 175, 191, 128, 151, 152, - 155, 156, 191, 132, 128, 191, 128, 170, - 171, 191, 128, 153, 154, 191, 160, 190, - 192, 255, 128, 184, 185, 191, 137, 128, - 174, 175, 191, 128, 129, 177, 178, 255, - 144, 191, 192, 255, 128, 142, 143, 144, - 145, 146, 149, 129, 148, 150, 191, 175, - 191, 192, 255, 132, 191, 192, 255, 128, - 144, 129, 143, 145, 191, 144, 153, 128, - 143, 145, 152, 154, 191, 135, 191, 192, - 255, 160, 168, 169, 171, 172, 173, 174, - 188, 189, 190, 191, 128, 159, 161, 167, - 170, 187, 185, 191, 192, 255, 128, 143, - 144, 173, 174, 191, 128, 131, 132, 162, - 163, 183, 184, 188, 189, 255, 133, 143, - 145, 191, 192, 255, 128, 146, 147, 159, - 160, 191, 160, 128, 191, 128, 129, 191, - 192, 255, 159, 160, 171, 128, 170, 172, - 191, 192, 255, 173, 191, 192, 255, 179, - 191, 192, 255, 128, 176, 177, 178, 129, - 191, 128, 129, 130, 191, 171, 175, 189, - 191, 192, 255, 128, 136, 137, 143, 144, - 153, 154, 191, 144, 145, 146, 147, 148, - 149, 154, 155, 156, 157, 158, 159, 128, - 143, 150, 153, 160, 191, 149, 157, 173, - 186, 188, 160, 161, 163, 164, 167, 168, - 132, 134, 149, 157, 186, 191, 139, 140, - 192, 255, 133, 145, 128, 134, 135, 137, - 138, 255, 166, 167, 129, 155, 187, 149, - 181, 143, 175, 137, 169, 131, 140, 191, - 192, 255, 160, 163, 164, 165, 184, 185, - 186, 128, 159, 161, 162, 166, 191, 133, - 191, 192, 255, 132, 160, 163, 167, 179, - 184, 186, 128, 164, 165, 168, 169, 187, - 188, 191, 130, 135, 137, 139, 144, 147, - 151, 153, 155, 157, 159, 163, 171, 179, - 184, 189, 191, 128, 140, 141, 148, 149, - 160, 161, 164, 165, 166, 167, 190, 138, - 164, 170, 128, 155, 156, 160, 161, 187, - 188, 191, 128, 191, 155, 156, 128, 191, - 151, 191, 192, 255, 156, 157, 160, 128, - 191, 181, 191, 192, 255, 158, 159, 186, - 128, 185, 187, 191, 192, 255, 162, 191, - 192, 255, 160, 168, 128, 159, 161, 167, - 169, 191, 158, 191, 192, 255, 9, 10, - 13, 32, 33, 34, 35, 38, 46, 47, - 60, 61, 62, 64, 92, 95, 123, 124, - 125, 126, 127, 194, 195, 198, 199, 203, - 204, 205, 206, 207, 210, 212, 213, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 228, 233, 234, 237, - 238, 239, 240, 0, 39, 40, 45, 48, - 57, 58, 63, 65, 90, 91, 96, 97, - 122, 192, 193, 196, 218, 229, 236, 241, - 247, 9, 32, 10, 61, 10, 38, 46, - 42, 47, 42, 46, 69, 101, 48, 57, - 60, 61, 61, 62, 61, 45, 95, 194, - 195, 198, 199, 203, 204, 205, 206, 207, - 210, 212, 213, 214, 215, 216, 217, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 228, 233, 234, 237, 239, 240, 243, 48, - 57, 65, 90, 97, 122, 196, 218, 229, - 236, 124, 125, 128, 191, 170, 181, 186, - 128, 191, 151, 183, 128, 255, 192, 255, - 0, 127, 173, 130, 133, 146, 159, 165, - 171, 175, 191, 192, 255, 181, 190, 128, - 175, 176, 183, 184, 185, 186, 191, 134, - 139, 141, 162, 128, 135, 136, 255, 182, - 130, 137, 176, 151, 152, 154, 160, 136, - 191, 192, 255, 128, 143, 144, 170, 171, - 175, 176, 178, 179, 191, 128, 159, 160, - 191, 176, 128, 138, 139, 173, 174, 255, - 148, 150, 164, 167, 173, 176, 185, 189, - 190, 192, 255, 144, 128, 145, 146, 175, - 176, 191, 128, 140, 141, 255, 166, 176, - 178, 191, 192, 255, 186, 128, 137, 138, - 170, 171, 179, 180, 181, 182, 191, 160, - 161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, - 186, 187, 188, 189, 190, 128, 191, 128, - 129, 130, 131, 137, 138, 139, 140, 141, - 142, 143, 144, 153, 154, 155, 156, 157, + 186, 187, 188, 189, 190, 191, 128, 173, + 128, 155, 160, 180, 182, 189, 148, 161, + 163, 255, 176, 164, 165, 132, 169, 177, + 141, 142, 145, 146, 179, 181, 186, 187, + 158, 133, 134, 137, 138, 143, 150, 152, + 155, 164, 165, 178, 255, 188, 129, 131, + 133, 138, 143, 144, 147, 168, 170, 176, + 178, 179, 181, 182, 184, 185, 190, 255, + 157, 131, 134, 137, 138, 142, 144, 146, + 152, 159, 165, 182, 255, 129, 131, 133, + 141, 143, 145, 147, 168, 170, 176, 178, + 179, 181, 185, 188, 255, 134, 138, 142, + 143, 145, 159, 164, 165, 176, 184, 186, + 255, 129, 131, 133, 140, 143, 144, 147, + 168, 170, 176, 178, 179, 181, 185, 188, + 191, 177, 128, 132, 135, 136, 139, 141, + 150, 151, 156, 157, 159, 163, 166, 175, + 156, 130, 131, 133, 138, 142, 144, 146, + 149, 153, 154, 158, 159, 163, 164, 168, + 170, 174, 185, 190, 191, 144, 151, 128, + 130, 134, 136, 138, 141, 166, 175, 128, + 131, 133, 140, 142, 144, 146, 168, 170, + 185, 189, 255, 133, 137, 151, 142, 148, + 155, 159, 164, 165, 176, 255, 128, 131, + 133, 140, 142, 144, 146, 168, 170, 179, + 181, 185, 188, 191, 158, 128, 132, 134, + 136, 138, 141, 149, 150, 160, 163, 166, + 175, 177, 178, 129, 131, 133, 140, 142, + 144, 146, 186, 189, 255, 133, 137, 143, + 147, 152, 158, 164, 165, 176, 185, 192, + 255, 189, 130, 131, 133, 150, 154, 177, + 179, 187, 138, 150, 128, 134, 143, 148, + 152, 159, 166, 175, 178, 179, 129, 186, + 128, 142, 144, 153, 132, 138, 141, 165, + 167, 129, 130, 135, 136, 148, 151, 153, + 159, 161, 163, 170, 171, 173, 185, 187, + 189, 134, 128, 132, 136, 141, 144, 153, + 156, 159, 128, 181, 183, 185, 152, 153, + 160, 169, 190, 191, 128, 135, 137, 172, + 177, 191, 128, 132, 134, 151, 153, 188, + 134, 128, 129, 130, 131, 137, 138, 139, + 140, 141, 142, 143, 144, 153, 154, 155, + 156, 157, 158, 159, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 173, + 175, 176, 177, 178, 179, 181, 182, 183, + 188, 189, 190, 191, 132, 152, 172, 184, + 185, 187, 128, 191, 128, 137, 144, 255, + 158, 159, 134, 187, 136, 140, 142, 143, + 137, 151, 153, 142, 143, 158, 159, 137, + 177, 142, 143, 182, 183, 191, 255, 128, + 130, 133, 136, 150, 152, 255, 145, 150, + 151, 155, 156, 160, 168, 178, 255, 128, + 143, 160, 255, 182, 183, 190, 255, 129, + 255, 173, 174, 192, 255, 129, 154, 160, + 255, 171, 173, 185, 255, 128, 140, 142, + 148, 160, 180, 128, 147, 160, 172, 174, + 176, 178, 179, 148, 150, 152, 155, 158, + 159, 170, 255, 139, 141, 144, 153, 160, + 255, 184, 255, 128, 170, 176, 255, 182, + 255, 128, 158, 160, 171, 176, 187, 134, + 173, 176, 180, 128, 171, 176, 255, 138, + 143, 155, 255, 128, 155, 160, 255, 159, + 189, 190, 192, 255, 167, 128, 137, 144, + 153, 176, 189, 140, 143, 154, 170, 180, + 255, 180, 255, 128, 183, 128, 137, 141, + 189, 128, 136, 144, 146, 148, 182, 184, + 185, 128, 181, 187, 191, 150, 151, 158, + 159, 152, 154, 156, 158, 134, 135, 142, + 143, 190, 255, 190, 128, 180, 182, 188, + 130, 132, 134, 140, 144, 147, 150, 155, + 160, 172, 178, 180, 182, 188, 128, 129, + 130, 131, 132, 133, 134, 176, 177, 178, + 179, 180, 181, 182, 183, 191, 255, 129, + 147, 149, 176, 178, 190, 192, 255, 144, + 156, 161, 144, 156, 165, 176, 130, 135, + 149, 164, 166, 168, 138, 147, 152, 157, + 170, 185, 188, 191, 142, 133, 137, 160, + 255, 137, 255, 128, 174, 176, 255, 159, + 165, 170, 180, 255, 167, 173, 128, 165, + 176, 255, 168, 174, 176, 190, 192, 255, + 128, 150, 160, 166, 168, 174, 176, 182, + 184, 190, 128, 134, 136, 142, 144, 150, + 152, 158, 160, 191, 128, 129, 130, 131, + 132, 133, 134, 135, 144, 145, 255, 133, + 135, 161, 175, 177, 181, 184, 188, 160, + 151, 152, 187, 192, 255, 133, 173, 177, + 255, 143, 159, 187, 255, 176, 191, 182, + 183, 184, 191, 192, 255, 150, 255, 128, + 146, 147, 148, 152, 153, 154, 155, 156, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, - 174, 175, 176, 177, 178, 179, 180, 182, - 183, 184, 188, 189, 190, 191, 132, 187, - 129, 130, 132, 133, 134, 176, 177, 178, - 179, 180, 181, 182, 183, 128, 191, 128, - 129, 130, 131, 132, 133, 134, 135, 144, - 136, 143, 145, 191, 192, 255, 182, 183, - 184, 128, 191, 128, 191, 191, 128, 190, - 192, 255, 128, 146, 147, 148, 152, 153, - 154, 155, 156, 158, 159, 160, 161, 162, + 174, 175, 176, 129, 255, 141, 255, 144, + 189, 141, 143, 172, 255, 191, 128, 175, + 180, 189, 151, 159, 162, 255, 175, 137, + 138, 184, 255, 183, 255, 168, 255, 128, + 179, 188, 134, 143, 154, 159, 184, 186, + 190, 255, 128, 173, 176, 255, 148, 159, + 189, 255, 129, 142, 154, 159, 191, 255, + 128, 182, 128, 141, 144, 153, 160, 182, + 186, 255, 128, 130, 155, 157, 160, 175, + 178, 182, 129, 134, 137, 142, 145, 150, + 160, 166, 168, 174, 176, 255, 155, 166, + 175, 128, 170, 172, 173, 176, 185, 158, + 159, 160, 255, 164, 175, 135, 138, 188, + 255, 164, 169, 171, 172, 173, 174, 175, + 180, 181, 182, 183, 184, 185, 187, 188, + 189, 190, 191, 165, 186, 174, 175, 154, + 255, 190, 128, 134, 147, 151, 157, 168, + 170, 182, 184, 188, 128, 129, 131, 132, + 134, 255, 147, 255, 190, 255, 144, 145, + 136, 175, 188, 255, 128, 143, 160, 175, + 179, 180, 141, 143, 176, 180, 182, 255, + 189, 255, 191, 144, 153, 161, 186, 129, + 154, 166, 255, 191, 255, 130, 135, 138, + 143, 146, 151, 154, 156, 144, 145, 146, + 147, 148, 150, 151, 152, 155, 157, 158, + 160, 170, 171, 172, 175, 161, 169, 128, + 129, 130, 131, 133, 135, 138, 139, 140, + 141, 142, 143, 144, 145, 146, 147, 148, + 149, 152, 156, 157, 160, 161, 162, 163, + 164, 166, 168, 169, 170, 171, 172, 173, + 174, 176, 177, 153, 155, 178, 179, 128, + 139, 141, 166, 168, 186, 188, 189, 191, + 255, 142, 143, 158, 255, 187, 255, 128, + 180, 189, 128, 156, 160, 255, 145, 159, + 161, 255, 128, 159, 176, 255, 139, 143, + 187, 255, 128, 157, 160, 255, 144, 132, + 135, 150, 255, 158, 159, 170, 175, 148, + 151, 188, 255, 128, 167, 176, 255, 164, + 255, 183, 255, 128, 149, 160, 167, 136, + 188, 128, 133, 138, 181, 183, 184, 191, + 255, 150, 159, 183, 255, 128, 158, 160, + 178, 180, 181, 128, 149, 160, 185, 128, + 183, 190, 191, 191, 128, 131, 133, 134, + 140, 147, 149, 151, 153, 179, 184, 186, + 160, 188, 128, 156, 128, 135, 137, 166, + 128, 181, 128, 149, 160, 178, 128, 145, + 128, 178, 129, 130, 131, 132, 133, 135, + 136, 138, 139, 140, 141, 144, 145, 146, + 147, 150, 151, 152, 153, 154, 155, 156, + 162, 163, 171, 176, 177, 178, 128, 134, + 135, 165, 176, 190, 144, 168, 176, 185, + 128, 180, 182, 191, 182, 144, 179, 155, + 133, 137, 141, 143, 157, 255, 190, 128, + 145, 147, 183, 136, 128, 134, 138, 141, + 143, 157, 159, 168, 176, 255, 171, 175, + 186, 255, 128, 131, 133, 140, 143, 144, + 147, 168, 170, 176, 178, 179, 181, 185, + 188, 191, 144, 151, 128, 132, 135, 136, + 139, 141, 157, 163, 166, 172, 176, 180, + 128, 138, 144, 153, 134, 136, 143, 154, + 255, 128, 181, 184, 255, 129, 151, 158, + 255, 129, 131, 133, 143, 154, 255, 128, + 137, 128, 153, 157, 171, 176, 185, 160, + 255, 170, 190, 192, 255, 128, 184, 128, + 136, 138, 182, 184, 191, 128, 144, 153, + 178, 255, 168, 144, 145, 183, 255, 128, + 142, 145, 149, 129, 141, 144, 146, 147, + 148, 175, 255, 132, 255, 128, 144, 129, + 143, 144, 153, 145, 152, 135, 255, 160, + 168, 169, 171, 172, 173, 174, 188, 189, + 190, 191, 161, 167, 185, 255, 128, 158, + 160, 169, 144, 173, 176, 180, 128, 131, + 144, 153, 163, 183, 189, 255, 144, 255, + 133, 143, 191, 255, 143, 159, 160, 128, + 129, 255, 159, 160, 171, 172, 255, 173, + 255, 179, 255, 128, 176, 177, 178, 128, + 129, 171, 175, 189, 255, 128, 136, 144, + 153, 157, 158, 133, 134, 137, 144, 145, + 146, 147, 148, 149, 154, 155, 156, 157, + 158, 159, 168, 169, 170, 150, 153, 165, + 169, 173, 178, 187, 255, 131, 132, 140, + 169, 174, 255, 130, 132, 149, 157, 173, + 186, 188, 160, 161, 163, 164, 167, 168, + 132, 134, 149, 157, 186, 139, 140, 191, + 255, 134, 128, 132, 138, 144, 146, 255, + 166, 167, 129, 155, 187, 149, 181, 143, + 175, 137, 169, 131, 140, 141, 192, 255, + 128, 182, 187, 255, 173, 180, 182, 255, + 132, 155, 159, 161, 175, 128, 160, 163, + 164, 165, 184, 185, 186, 161, 162, 128, + 134, 136, 152, 155, 161, 163, 164, 166, + 170, 133, 143, 151, 255, 139, 143, 154, + 255, 164, 167, 185, 187, 128, 131, 133, + 159, 161, 162, 169, 178, 180, 183, 130, + 135, 137, 139, 148, 151, 153, 155, 157, + 159, 164, 190, 141, 143, 145, 146, 161, + 162, 167, 170, 172, 178, 180, 183, 185, + 188, 128, 137, 139, 155, 161, 163, 165, + 169, 171, 187, 155, 156, 151, 255, 156, + 157, 160, 181, 255, 186, 187, 255, 162, + 255, 160, 168, 161, 167, 158, 255, 160, + 132, 135, 133, 134, 176, 255, 128, 191, + 154, 164, 168, 128, 149, 150, 191, 128, + 152, 153, 191, 181, 128, 159, 160, 189, + 190, 191, 189, 128, 131, 132, 185, 186, + 191, 144, 128, 151, 152, 161, 162, 176, + 177, 255, 169, 177, 129, 132, 141, 142, + 145, 146, 179, 181, 186, 188, 190, 191, + 192, 255, 142, 158, 128, 155, 156, 161, + 162, 175, 176, 177, 178, 191, 169, 177, + 180, 183, 128, 132, 133, 138, 139, 142, + 143, 144, 145, 146, 147, 185, 186, 191, + 157, 128, 152, 153, 158, 159, 177, 178, + 180, 181, 191, 142, 146, 169, 177, 180, + 189, 128, 132, 133, 185, 186, 191, 144, + 185, 128, 159, 160, 161, 162, 191, 169, + 177, 180, 189, 128, 132, 133, 140, 141, + 142, 143, 144, 145, 146, 147, 185, 186, + 191, 158, 177, 128, 155, 156, 161, 162, + 191, 131, 145, 155, 157, 128, 132, 133, + 138, 139, 141, 142, 149, 150, 152, 153, + 159, 160, 162, 163, 164, 165, 167, 168, + 170, 171, 173, 174, 185, 186, 191, 144, + 128, 191, 141, 145, 169, 189, 128, 132, + 133, 185, 186, 191, 128, 151, 152, 154, + 155, 159, 160, 161, 162, 191, 128, 141, + 145, 169, 180, 189, 129, 132, 133, 185, + 186, 191, 158, 128, 159, 160, 161, 162, + 176, 177, 178, 179, 191, 141, 145, 189, + 128, 132, 133, 186, 187, 191, 142, 128, + 147, 148, 150, 151, 158, 159, 161, 162, + 185, 186, 191, 178, 188, 128, 132, 133, + 150, 151, 153, 154, 189, 190, 191, 128, + 134, 135, 191, 128, 177, 129, 179, 180, + 191, 128, 131, 137, 141, 152, 160, 164, + 166, 172, 177, 189, 129, 132, 133, 134, + 135, 138, 139, 147, 148, 167, 168, 169, + 170, 179, 180, 191, 133, 128, 134, 135, + 155, 156, 159, 160, 191, 128, 129, 191, + 136, 128, 172, 173, 191, 128, 135, 136, + 140, 141, 191, 191, 128, 170, 171, 190, + 161, 128, 143, 144, 149, 150, 153, 154, + 157, 158, 164, 165, 166, 167, 173, 174, + 176, 177, 180, 181, 255, 130, 141, 143, + 159, 134, 187, 136, 140, 142, 143, 137, + 151, 153, 142, 143, 158, 159, 137, 177, + 191, 142, 143, 182, 183, 192, 255, 129, + 151, 128, 133, 134, 135, 136, 255, 145, + 150, 151, 155, 191, 192, 255, 128, 143, + 144, 159, 160, 255, 182, 183, 190, 191, + 192, 255, 128, 129, 255, 173, 174, 192, + 255, 128, 129, 154, 155, 159, 160, 255, + 171, 173, 185, 191, 192, 255, 141, 128, + 145, 146, 159, 160, 177, 178, 191, 173, + 128, 145, 146, 159, 160, 176, 177, 191, + 128, 179, 180, 191, 151, 156, 128, 191, + 128, 159, 160, 255, 184, 191, 192, 255, + 169, 128, 170, 171, 175, 176, 255, 182, + 191, 192, 255, 128, 158, 159, 191, 128, + 143, 144, 173, 174, 175, 176, 180, 181, + 191, 128, 171, 172, 175, 176, 255, 138, + 191, 192, 255, 128, 150, 151, 159, 160, + 255, 149, 191, 192, 255, 167, 128, 191, + 128, 132, 133, 179, 180, 191, 128, 132, + 133, 139, 140, 191, 128, 130, 131, 160, + 161, 173, 174, 175, 176, 185, 186, 255, + 166, 191, 192, 255, 128, 163, 164, 191, + 128, 140, 141, 143, 144, 153, 154, 189, + 190, 191, 128, 136, 137, 191, 173, 128, + 168, 169, 177, 178, 180, 181, 182, 183, + 191, 128, 255, 192, 255, 150, 151, 158, + 159, 152, 154, 156, 158, 134, 135, 142, + 143, 190, 191, 192, 255, 181, 189, 191, + 128, 190, 133, 181, 128, 129, 130, 140, + 141, 143, 144, 147, 148, 149, 150, 155, + 156, 159, 160, 172, 173, 177, 178, 188, + 189, 191, 177, 191, 128, 190, 128, 143, + 144, 156, 157, 191, 130, 135, 148, 164, + 166, 168, 128, 137, 138, 149, 150, 151, + 152, 157, 158, 169, 170, 185, 186, 187, + 188, 191, 142, 128, 132, 133, 137, 138, + 159, 160, 255, 137, 191, 192, 255, 175, + 128, 255, 159, 165, 170, 175, 177, 180, + 191, 192, 255, 166, 173, 128, 167, 168, + 175, 176, 255, 168, 174, 176, 191, 192, + 255, 167, 175, 183, 191, 128, 150, 151, + 159, 160, 190, 135, 143, 151, 128, 158, + 159, 191, 128, 132, 133, 135, 136, 160, + 161, 169, 170, 176, 177, 181, 182, 183, + 184, 188, 189, 191, 160, 151, 154, 187, + 192, 255, 128, 132, 133, 173, 174, 176, + 177, 255, 143, 159, 187, 191, 192, 255, + 128, 175, 176, 191, 150, 191, 192, 255, + 141, 191, 192, 255, 128, 143, 144, 189, + 190, 191, 141, 143, 160, 169, 172, 191, + 192, 255, 191, 128, 174, 175, 190, 128, + 157, 158, 159, 160, 255, 176, 191, 192, + 255, 128, 150, 151, 159, 160, 161, 162, + 255, 175, 137, 138, 184, 191, 192, 255, + 128, 182, 183, 255, 130, 134, 139, 163, + 191, 192, 255, 128, 129, 130, 179, 180, + 191, 187, 189, 128, 177, 178, 183, 184, + 191, 128, 137, 138, 165, 166, 175, 176, + 255, 135, 159, 189, 191, 192, 255, 128, + 131, 132, 178, 179, 191, 143, 165, 191, + 128, 159, 160, 175, 176, 185, 186, 190, + 128, 168, 169, 191, 131, 186, 128, 139, + 140, 159, 160, 182, 183, 189, 190, 255, + 176, 178, 180, 183, 184, 190, 191, 192, + 255, 129, 128, 130, 131, 154, 155, 157, + 158, 159, 160, 170, 171, 177, 178, 180, + 181, 191, 128, 167, 175, 129, 134, 135, + 136, 137, 142, 143, 144, 145, 150, 151, + 159, 160, 255, 155, 166, 175, 128, 162, + 163, 191, 164, 175, 135, 138, 188, 191, + 192, 255, 174, 175, 154, 191, 192, 255, + 157, 169, 183, 189, 191, 128, 134, 135, + 146, 147, 151, 152, 158, 159, 190, 130, + 133, 128, 255, 178, 191, 192, 255, 128, + 146, 147, 255, 190, 191, 192, 255, 128, + 143, 144, 255, 144, 145, 136, 175, 188, + 191, 192, 255, 181, 128, 175, 176, 255, + 189, 191, 192, 255, 128, 160, 161, 186, + 187, 191, 128, 129, 154, 155, 165, 166, + 255, 191, 192, 255, 128, 129, 130, 135, + 136, 137, 138, 143, 144, 145, 146, 151, + 152, 153, 154, 156, 157, 191, 128, 191, + 128, 129, 130, 131, 133, 138, 139, 140, + 141, 142, 143, 144, 145, 146, 147, 148, + 149, 152, 156, 157, 160, 161, 162, 163, + 164, 166, 168, 169, 170, 171, 172, 173, + 174, 176, 177, 132, 151, 153, 155, 158, + 175, 178, 179, 180, 191, 140, 167, 187, + 190, 128, 255, 142, 143, 158, 191, 192, + 255, 187, 191, 192, 255, 128, 180, 181, + 191, 128, 156, 157, 159, 160, 255, 145, + 191, 192, 255, 128, 159, 160, 175, 176, + 255, 139, 143, 182, 191, 192, 255, 144, + 132, 135, 150, 191, 192, 255, 158, 175, + 148, 151, 188, 191, 192, 255, 128, 167, + 168, 175, 176, 255, 164, 191, 192, 255, + 183, 191, 192, 255, 128, 149, 150, 159, + 160, 167, 168, 191, 136, 182, 188, 128, + 133, 134, 137, 138, 184, 185, 190, 191, + 255, 150, 159, 183, 191, 192, 255, 179, + 128, 159, 160, 181, 182, 191, 128, 149, + 150, 159, 160, 185, 186, 191, 128, 183, + 184, 189, 190, 191, 128, 148, 152, 129, + 143, 144, 179, 180, 191, 128, 159, 160, + 188, 189, 191, 128, 156, 157, 191, 136, + 128, 164, 165, 191, 128, 181, 182, 191, + 128, 149, 150, 159, 160, 178, 179, 191, + 128, 145, 146, 191, 128, 178, 179, 191, + 128, 130, 131, 132, 133, 134, 135, 136, + 138, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 162, 163, + 171, 176, 177, 178, 129, 191, 128, 130, + 131, 183, 184, 191, 128, 130, 131, 175, + 176, 191, 128, 143, 144, 168, 169, 191, + 128, 130, 131, 166, 167, 191, 182, 128, + 143, 144, 178, 179, 191, 128, 130, 131, + 178, 179, 191, 128, 154, 156, 129, 132, + 133, 191, 146, 128, 171, 172, 191, 135, + 137, 142, 158, 128, 168, 169, 175, 176, + 255, 159, 191, 192, 255, 144, 128, 156, + 157, 161, 162, 191, 128, 134, 135, 138, + 139, 191, 128, 175, 176, 191, 134, 128, + 131, 132, 135, 136, 191, 128, 174, 175, + 191, 128, 151, 152, 155, 156, 191, 132, + 128, 191, 128, 170, 171, 191, 128, 153, + 154, 191, 160, 190, 192, 255, 128, 184, + 185, 191, 137, 128, 174, 175, 191, 128, + 129, 177, 178, 255, 144, 191, 192, 255, + 128, 142, 143, 144, 145, 146, 149, 129, + 148, 150, 191, 175, 191, 192, 255, 132, + 191, 192, 255, 128, 144, 129, 143, 145, + 191, 144, 153, 128, 143, 145, 152, 154, + 191, 135, 191, 192, 255, 160, 168, 169, + 171, 172, 173, 174, 188, 189, 190, 191, + 128, 159, 161, 167, 170, 187, 185, 191, + 192, 255, 128, 143, 144, 173, 174, 191, + 128, 131, 132, 162, 163, 183, 184, 188, + 189, 255, 133, 143, 145, 191, 192, 255, + 128, 146, 147, 159, 160, 191, 160, 128, + 191, 128, 129, 191, 192, 255, 159, 160, + 171, 128, 170, 172, 191, 192, 255, 173, + 191, 192, 255, 179, 191, 192, 255, 128, + 176, 177, 178, 129, 191, 128, 129, 130, + 191, 171, 175, 189, 191, 192, 255, 128, + 136, 137, 143, 144, 153, 154, 191, 144, + 145, 146, 147, 148, 149, 154, 155, 156, + 157, 158, 159, 128, 143, 150, 153, 160, + 191, 149, 157, 173, 186, 188, 160, 161, + 163, 164, 167, 168, 132, 134, 149, 157, + 186, 191, 139, 140, 192, 255, 133, 145, + 128, 134, 135, 137, 138, 255, 166, 167, + 129, 155, 187, 149, 181, 143, 175, 137, + 169, 131, 140, 191, 192, 255, 160, 163, + 164, 165, 184, 185, 186, 128, 159, 161, + 162, 166, 191, 133, 191, 192, 255, 132, + 160, 163, 167, 179, 184, 186, 128, 164, + 165, 168, 169, 187, 188, 191, 130, 135, + 137, 139, 144, 147, 151, 153, 155, 157, + 159, 163, 171, 179, 184, 189, 191, 128, + 140, 141, 148, 149, 160, 161, 164, 165, + 166, 167, 190, 138, 164, 170, 128, 155, + 156, 160, 161, 187, 188, 191, 128, 191, + 155, 156, 128, 191, 151, 191, 192, 255, + 156, 157, 160, 128, 191, 181, 191, 192, + 255, 158, 159, 186, 128, 185, 187, 191, + 192, 255, 162, 191, 192, 255, 160, 168, + 128, 159, 161, 167, 169, 191, 158, 191, + 192, 255, 9, 10, 13, 32, 33, 34, + 35, 38, 46, 47, 60, 61, 62, 64, + 92, 95, 123, 124, 125, 126, 127, 194, + 195, 198, 199, 203, 204, 205, 206, 207, + 210, 212, 213, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 228, 233, 234, 237, 238, 239, 240, 0, + 36, 37, 45, 48, 57, 58, 63, 65, + 90, 91, 96, 97, 122, 192, 193, 196, + 218, 229, 236, 241, 247, 9, 32, 10, + 61, 10, 38, 46, 42, 47, 46, 69, + 101, 48, 57, 60, 61, 61, 62, 61, + 45, 95, 194, 195, 198, 199, 203, 204, + 205, 206, 207, 210, 212, 213, 214, 215, + 216, 217, 219, 220, 221, 222, 223, 224, + 225, 226, 227, 228, 233, 234, 237, 239, + 240, 243, 48, 57, 65, 90, 97, 122, + 196, 218, 229, 236, 124, 125, 128, 191, + 170, 181, 186, 128, 191, 151, 183, 128, + 255, 192, 255, 0, 127, 173, 130, 133, + 146, 159, 165, 171, 175, 191, 192, 255, + 181, 190, 128, 175, 176, 183, 184, 185, + 186, 191, 134, 139, 141, 162, 128, 135, + 136, 255, 182, 130, 137, 176, 151, 152, + 154, 160, 136, 191, 192, 255, 128, 143, + 144, 170, 171, 175, 176, 178, 179, 191, + 128, 159, 160, 191, 176, 128, 138, 139, + 173, 174, 255, 148, 150, 164, 167, 173, + 176, 185, 189, 190, 192, 255, 144, 128, + 145, 146, 175, 176, 191, 128, 140, 141, + 255, 166, 176, 178, 191, 192, 255, 186, + 128, 137, 138, 170, 171, 179, 180, 181, + 182, 191, 160, 161, 162, 164, 165, 166, + 167, 168, 169, 170, 171, 172, 173, 174, + 175, 176, 177, 178, 179, 180, 181, 182, + 183, 184, 185, 186, 187, 188, 189, 190, + 128, 191, 128, 129, 130, 131, 137, 138, + 139, 140, 141, 142, 143, 144, 153, 154, + 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, - 171, 172, 173, 174, 175, 176, 129, 191, - 192, 255, 158, 159, 128, 157, 160, 191, - 192, 255, 128, 191, 164, 169, 171, 172, - 173, 174, 175, 180, 181, 182, 183, 184, - 185, 187, 188, 189, 190, 191, 128, 163, - 165, 186, 144, 145, 146, 147, 148, 150, - 151, 152, 155, 157, 158, 160, 170, 171, - 172, 175, 128, 159, 161, 169, 173, 191, - 128, 191, 10, 13, 34, 36, 37, 92, - 128, 191, 192, 223, 224, 239, 240, 247, - 248, 255, 10, 13, 34, 92, 36, 37, - 128, 191, 192, 223, 224, 239, 240, 247, - 248, 255, 10, 13, 92, 36, 37, 128, - 191, 192, 223, 224, 239, 240, 247, 248, - 255, 92, 36, 37, 192, 223, 224, 239, - 240, 247, 10, 13, 34, 92, 36, 37, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 182, 183, 184, 188, 189, 190, + 191, 132, 187, 129, 130, 132, 133, 134, + 176, 177, 178, 179, 180, 181, 182, 183, + 128, 191, 128, 129, 130, 131, 132, 133, + 134, 135, 144, 136, 143, 145, 191, 192, + 255, 182, 183, 184, 128, 191, 128, 191, + 191, 128, 190, 192, 255, 128, 146, 147, + 148, 152, 153, 154, 155, 156, 158, 159, + 160, 161, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 171, 172, 173, 174, 175, + 176, 129, 191, 192, 255, 158, 159, 128, + 157, 160, 191, 192, 255, 128, 191, 164, + 169, 171, 172, 173, 174, 175, 180, 181, + 182, 183, 184, 185, 187, 188, 189, 190, + 191, 128, 163, 165, 186, 144, 145, 146, + 147, 148, 150, 151, 152, 155, 157, 158, + 160, 170, 171, 172, 175, 128, 159, 161, + 169, 173, 191, 128, 191, 10, 13, 34, + 36, 37, 92, 128, 191, 192, 223, 224, + 239, 240, 247, 248, 255, 10, 13, 34, + 36, 37, 92, 128, 191, 192, 223, 224, + 239, 240, 247, 248, 255, 10, 13, 34, + 36, 37, 92, 128, 191, 192, 223, 224, + 239, 240, 247, 248, 255, 10, 13, 34, + 36, 37, 92, 128, 191, 192, 223, 224, + 239, 240, 247, 248, 255, 10, 13, 36, + 37, 92, 128, 191, 192, 223, 224, 239, + 240, 247, 248, 255, 36, 37, 92, 123, + 192, 223, 224, 239, 240, 247, 10, 13, + 34, 36, 37, 92, 123, 128, 191, 192, + 223, 224, 239, 240, 247, 248, 255, 10, + 13, 34, 36, 37, 92, 123, 128, 191, 192, 223, 224, 239, 240, 247, 248, 255, - 10, 13, 34, 92, 36, 37, 128, 223, - 224, 239, 240, 247, 248, 255, 10, 13, - 34, 92, 36, 37, 128, 191, 192, 223, - 224, 239, 240, 247, 248, 255, 123, 126, - 123, 126, 128, 191, 128, 191, 128, 191, - 10, 13, 36, 37, 128, 191, 192, 223, - 224, 239, 240, 247, 248, 255, 10, 13, + 10, 13, 34, 36, 37, 92, 123, 128, + 191, 192, 223, 224, 239, 240, 247, 248, + 255, 10, 13, 34, 36, 37, 92, 128, + 191, 192, 223, 224, 239, 240, 247, 248, + 255, 36, 37, 92, 123, 192, 223, 224, + 239, 240, 247, 10, 13, 34, 36, 37, + 92, 123, 128, 191, 192, 223, 224, 239, + 240, 247, 248, 255, 10, 13, 34, 36, + 37, 92, 128, 191, 192, 223, 224, 239, + 240, 247, 248, 255, 10, 13, 34, 36, + 37, 92, 128, 191, 192, 223, 224, 239, + 240, 247, 248, 255, 10, 13, 34, 36, + 37, 92, 128, 191, 192, 223, 224, 239, + 240, 247, 248, 255, 10, 13, 34, 36, + 37, 92, 128, 191, 192, 223, 224, 239, + 240, 247, 248, 255, 10, 13, 34, 36, + 37, 92, 128, 191, 192, 223, 224, 239, + 240, 247, 248, 255, 10, 13, 34, 36, + 37, 92, 128, 191, 192, 223, 224, 239, + 240, 247, 248, 255, 10, 13, 34, 36, + 37, 92, 128, 191, 192, 223, 224, 239, + 240, 247, 248, 255, 123, 126, 123, 126, + 128, 191, 128, 191, 128, 191, 10, 13, 36, 37, 128, 191, 192, 223, 224, 239, - 240, 247, 248, 255, 126, 126, 128, 191, - 128, 191, 128, 191, 10, 13, 36, 37, + 240, 247, 248, 255, 10, 13, 36, 37, 128, 191, 192, 223, 224, 239, 240, 247, 248, 255, 10, 13, 36, 37, 128, 191, 192, 223, 224, 239, 240, 247, 248, 255, - 126, 126, 128, 191, 128, 191, 128, 191, - 194, 195, 198, 199, 203, 204, 205, 206, - 207, 210, 212, 213, 214, 215, 216, 217, - 219, 220, 221, 222, 223, 224, 225, 226, - 227, 228, 233, 234, 237, 238, 239, 240, - 65, 90, 97, 122, 128, 191, 192, 193, - 196, 218, 229, 236, 241, 247, 248, 255, - 45, 95, 194, 195, 198, 199, 203, 204, - 205, 206, 207, 210, 212, 213, 214, 215, - 216, 217, 219, 220, 221, 222, 223, 224, - 225, 226, 227, 228, 233, 234, 237, 239, - 240, 243, 48, 57, 65, 90, 97, 122, - 196, 218, 229, 236, 128, 191, 170, 181, - 186, 128, 191, 151, 183, 128, 255, 192, - 255, 0, 127, 173, 130, 133, 146, 159, - 165, 171, 175, 191, 192, 255, 181, 190, - 128, 175, 176, 183, 184, 185, 186, 191, - 134, 139, 141, 162, 128, 135, 136, 255, - 182, 130, 137, 176, 151, 152, 154, 160, - 136, 191, 192, 255, 128, 143, 144, 170, - 171, 175, 176, 178, 179, 191, 128, 159, - 160, 191, 176, 128, 138, 139, 173, 174, - 255, 148, 150, 164, 167, 173, 176, 185, - 189, 190, 192, 255, 144, 128, 145, 146, - 175, 176, 191, 128, 140, 141, 255, 166, - 176, 178, 191, 192, 255, 186, 128, 137, - 138, 170, 171, 179, 180, 181, 182, 191, - 160, 161, 162, 164, 165, 166, 167, 168, - 169, 170, 171, 172, 173, 174, 175, 176, - 177, 178, 179, 180, 181, 182, 183, 184, - 185, 186, 187, 188, 189, 190, 128, 191, - 128, 129, 130, 131, 137, 138, 139, 140, - 141, 142, 143, 144, 153, 154, 155, 156, - 157, 158, 159, 160, 161, 162, 163, 164, - 165, 166, 167, 168, 169, 170, 171, 172, - 173, 174, 175, 176, 177, 178, 179, 180, - 182, 183, 184, 188, 189, 190, 191, 132, - 187, 129, 130, 132, 133, 134, 176, 177, - 178, 179, 180, 181, 182, 183, 128, 191, - 128, 129, 130, 131, 132, 133, 134, 135, - 144, 136, 143, 145, 191, 192, 255, 182, - 183, 184, 128, 191, 128, 191, 191, 128, - 190, 192, 255, 128, 146, 147, 148, 152, - 153, 154, 155, 156, 158, 159, 160, 161, + 10, 13, 36, 37, 128, 191, 192, 223, + 224, 239, 240, 247, 248, 255, 126, 126, + 128, 191, 128, 191, 128, 191, 10, 13, + 36, 37, 128, 191, 192, 223, 224, 239, + 240, 247, 248, 255, 10, 13, 36, 37, + 128, 191, 192, 223, 224, 239, 240, 247, + 248, 255, 126, 126, 128, 191, 128, 191, + 128, 191, 95, 194, 195, 198, 199, 203, + 204, 205, 206, 207, 210, 212, 213, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 228, 233, 234, 237, + 238, 239, 240, 65, 90, 97, 122, 128, + 191, 192, 193, 196, 218, 229, 236, 241, + 247, 248, 255, 45, 95, 194, 195, 198, + 199, 203, 204, 205, 206, 207, 210, 212, + 213, 214, 215, 216, 217, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 228, 233, + 234, 237, 239, 240, 243, 48, 57, 65, + 90, 97, 122, 196, 218, 229, 236, 128, + 191, 170, 181, 186, 128, 191, 151, 183, + 128, 255, 192, 255, 0, 127, 173, 130, + 133, 146, 159, 165, 171, 175, 191, 192, + 255, 181, 190, 128, 175, 176, 183, 184, + 185, 186, 191, 134, 139, 141, 162, 128, + 135, 136, 255, 182, 130, 137, 176, 151, + 152, 154, 160, 136, 191, 192, 255, 128, + 143, 144, 170, 171, 175, 176, 178, 179, + 191, 128, 159, 160, 191, 176, 128, 138, + 139, 173, 174, 255, 148, 150, 164, 167, + 173, 176, 185, 189, 190, 192, 255, 144, + 128, 145, 146, 175, 176, 191, 128, 140, + 141, 255, 166, 176, 178, 191, 192, 255, + 186, 128, 137, 138, 170, 171, 179, 180, + 181, 182, 191, 160, 161, 162, 164, 165, + 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 177, 178, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, + 190, 128, 191, 128, 129, 130, 131, 137, + 138, 139, 140, 141, 142, 143, 144, 153, + 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, - 170, 171, 172, 173, 174, 175, 176, 129, - 191, 192, 255, 158, 159, 128, 157, 160, - 191, 192, 255, 128, 191, 164, 169, 171, - 172, 173, 174, 175, 180, 181, 182, 183, - 184, 185, 187, 188, 189, 190, 191, 128, - 163, 165, 186, 144, 145, 146, 147, 148, - 150, 151, 152, 155, 157, 158, 160, 170, - 171, 172, 175, 128, 159, 161, 169, 173, - 191, 128, 191, + 170, 171, 172, 173, 174, 175, 176, 177, + 178, 179, 180, 182, 183, 184, 188, 189, + 190, 191, 132, 187, 129, 130, 132, 133, + 134, 176, 177, 178, 179, 180, 181, 182, + 183, 128, 191, 128, 129, 130, 131, 132, + 133, 134, 135, 144, 136, 143, 145, 191, + 192, 255, 182, 183, 184, 128, 191, 128, + 191, 191, 128, 190, 192, 255, 128, 146, + 147, 148, 152, 153, 154, 155, 156, 158, + 159, 160, 161, 162, 163, 164, 165, 166, + 167, 168, 169, 170, 171, 172, 173, 174, + 175, 176, 129, 191, 192, 255, 158, 159, + 128, 157, 160, 191, 192, 255, 128, 191, + 164, 169, 171, 172, 173, 174, 175, 180, + 181, 182, 183, 184, 185, 187, 188, 189, + 190, 191, 128, 163, 165, 186, 144, 145, + 146, 147, 148, 150, 151, 152, 155, 157, + 158, 160, 170, 171, 172, 175, 128, 159, + 161, 169, 173, 191, 128, 191, } var _hcltok_single_lengths []byte = []byte{ 0, 1, 1, 1, 2, 3, 2, 0, - 31, 30, 36, 1, 4, 0, 0, 0, + 32, 31, 36, 1, 4, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 0, 1, 1, 0, 0, 2, 0, 0, 0, 1, 32, 0, 0, 0, 0, 1, 3, @@ -1657,88 +1691,90 @@ var _hcltok_single_lengths []byte = []byte{ 4, 1, 5, 2, 0, 3, 2, 2, 2, 1, 7, 0, 7, 17, 3, 0, 2, 0, 3, 0, 0, 1, 0, 2, - 0, 1, 0, 0, 0, 0, 0, 1, - 1, 0, 0, 0, 1, 1, 1, 1, - 0, 0, 0, 1, 1, 4, 0, 0, - 0, 0, 1, 2, 1, 1, 1, 1, - 0, 1, 1, 0, 0, 2, 0, 0, - 0, 1, 32, 0, 0, 0, 0, 1, - 3, 1, 1, 1, 0, 2, 0, 1, - 1, 2, 0, 3, 0, 1, 0, 2, - 1, 2, 0, 0, 5, 1, 4, 0, - 0, 1, 43, 0, 0, 0, 2, 3, - 2, 1, 1, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1, 1, + 0, 1, 1, 0, 0, 0, 0, 0, + 1, 1, 1, 0, 0, 0, 1, 1, + 1, 1, 0, 0, 0, 1, 1, 4, + 0, 0, 0, 0, 1, 2, 1, 1, + 1, 1, 0, 1, 1, 0, 0, 2, + 0, 0, 0, 1, 32, 0, 0, 0, + 0, 1, 3, 1, 1, 1, 0, 2, + 0, 1, 1, 2, 0, 3, 0, 1, + 0, 2, 1, 2, 0, 0, 5, 1, + 4, 0, 0, 1, 43, 0, 0, 0, + 2, 3, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 4, 1, 0, 15, 0, 0, 0, 1, - 6, 1, 0, 0, 1, 0, 2, 0, - 0, 0, 9, 0, 1, 1, 0, 0, - 0, 3, 0, 1, 0, 28, 0, 0, - 0, 1, 0, 1, 0, 0, 0, 1, - 0, 0, 0, 0, 0, 0, 0, 1, - 0, 2, 0, 0, 18, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, - 1, 0, 0, 0, 16, 36, 0, 0, - 0, 0, 1, 0, 0, 0, 0, 0, - 1, 0, 0, 0, 0, 0, 0, 2, - 0, 0, 0, 0, 0, 1, 0, 0, - 0, 0, 0, 0, 0, 28, 0, 0, - 0, 1, 1, 1, 1, 0, 0, 2, - 0, 1, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1, 1, 4, 0, 0, - 2, 2, 0, 11, 0, 0, 0, 0, - 0, 0, 0, 1, 1, 3, 0, 0, - 4, 0, 0, 0, 18, 0, 0, 0, - 1, 4, 1, 4, 1, 0, 3, 2, - 2, 2, 1, 0, 0, 1, 8, 0, - 0, 0, 4, 12, 0, 2, 0, 3, - 0, 1, 0, 2, 0, 1, 2, 0, - 0, 3, 0, 1, 1, 1, 2, 2, - 4, 1, 6, 2, 4, 2, 4, 1, - 4, 0, 6, 1, 3, 1, 2, 0, - 2, 11, 1, 1, 1, 0, 1, 1, - 0, 2, 0, 3, 3, 2, 1, 0, - 0, 0, 1, 0, 1, 0, 1, 1, - 0, 2, 0, 0, 1, 0, 0, 0, - 0, 0, 0, 0, 1, 0, 0, 0, - 0, 0, 0, 0, 1, 0, 0, 0, - 4, 3, 2, 2, 0, 6, 1, 0, - 1, 1, 0, 2, 0, 4, 3, 0, 1, 1, 0, 0, 0, 0, 0, 0, - 0, 1, 0, 0, 0, 1, 0, 3, - 0, 2, 0, 0, 0, 3, 0, 2, - 1, 1, 3, 1, 0, 0, 0, 0, - 0, 5, 2, 0, 0, 0, 0, 0, - 0, 1, 0, 0, 1, 1, 0, 0, - 35, 4, 0, 0, 0, 0, 0, 0, + 0, 0, 4, 1, 0, 15, 0, 0, + 0, 1, 6, 1, 0, 0, 1, 0, + 2, 0, 0, 0, 9, 0, 1, 1, + 0, 0, 0, 3, 0, 1, 0, 28, + 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, - 3, 0, 1, 0, 0, 3, 0, 0, - 1, 0, 0, 0, 0, 28, 0, 0, - 0, 0, 1, 0, 3, 1, 4, 0, - 1, 0, 0, 1, 0, 0, 1, 0, - 0, 0, 0, 1, 1, 0, 7, 0, - 0, 2, 2, 0, 11, 0, 0, 0, - 0, 0, 1, 1, 3, 0, 0, 4, - 0, 0, 0, 12, 1, 4, 1, 5, - 2, 0, 3, 2, 2, 2, 1, 7, - 0, 7, 17, 3, 0, 2, 0, 3, - 0, 0, 1, 0, 2, 0, 53, 2, - 1, 1, 1, 1, 1, 2, 1, 3, - 2, 2, 1, 34, 1, 1, 0, 3, - 2, 0, 0, 0, 1, 2, 4, 1, - 0, 1, 0, 0, 0, 0, 1, 1, - 1, 0, 0, 1, 30, 47, 13, 9, - 3, 0, 1, 28, 2, 0, 18, 16, - 0, 6, 4, 3, 1, 4, 4, 4, - 1, 1, 1, 1, 0, 0, 0, 4, - 2, 1, 1, 0, 0, 0, 4, 2, - 1, 1, 0, 0, 0, 32, 34, 0, + 0, 1, 0, 2, 0, 0, 18, 0, + 0, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 16, 36, + 0, 0, 0, 0, 1, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 0, 0, + 0, 2, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 28, + 0, 0, 0, 1, 1, 1, 1, 0, + 0, 2, 0, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 4, + 0, 0, 2, 2, 0, 11, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 3, + 0, 0, 4, 0, 0, 0, 18, 0, + 0, 0, 1, 4, 1, 4, 1, 0, + 3, 2, 2, 2, 1, 0, 0, 1, + 8, 0, 0, 0, 4, 12, 0, 2, + 0, 3, 0, 1, 0, 2, 0, 1, + 2, 0, 0, 3, 0, 1, 1, 1, + 2, 2, 4, 1, 6, 2, 4, 2, + 4, 1, 4, 0, 6, 1, 3, 1, + 2, 0, 2, 11, 1, 1, 1, 0, + 1, 1, 0, 2, 0, 3, 3, 2, + 1, 0, 0, 0, 1, 0, 1, 0, + 1, 1, 0, 2, 0, 0, 1, 0, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 4, 3, 2, 2, 0, 6, + 1, 0, 1, 1, 0, 2, 0, 4, + 3, 0, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 1, 0, 0, 0, 1, + 0, 3, 0, 2, 0, 0, 0, 3, + 0, 2, 1, 1, 3, 1, 0, 0, + 0, 0, 0, 5, 2, 0, 0, 0, + 0, 0, 0, 1, 0, 0, 1, 1, + 0, 0, 35, 4, 0, 0, 0, 0, + 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, 3, 0, 1, 0, 0, 3, + 0, 0, 1, 0, 0, 0, 0, 28, + 0, 0, 0, 0, 1, 0, 3, 1, + 4, 0, 1, 0, 0, 1, 0, 0, + 1, 0, 0, 0, 0, 1, 1, 0, + 7, 0, 0, 2, 2, 0, 11, 0, + 0, 0, 0, 0, 1, 1, 3, 0, + 0, 4, 0, 0, 0, 12, 1, 4, + 1, 5, 2, 0, 3, 2, 2, 2, + 1, 7, 0, 7, 17, 3, 0, 2, + 0, 3, 0, 0, 1, 0, 2, 0, + 53, 2, 1, 1, 1, 1, 1, 2, + 3, 2, 2, 1, 34, 1, 1, 0, 3, 2, 0, 0, 0, 1, 2, 4, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 30, 47, 13, 9, 3, 0, 1, 28, 2, 0, 18, - 16, 0, + 16, 0, 6, 6, 6, 6, 5, 4, + 7, 7, 7, 6, 4, 7, 6, 6, + 6, 6, 6, 6, 6, 1, 1, 1, + 1, 0, 0, 0, 4, 4, 4, 4, + 1, 1, 0, 0, 0, 4, 2, 1, + 1, 0, 0, 0, 33, 34, 0, 3, + 2, 0, 0, 0, 1, 2, 4, 1, + 0, 1, 0, 0, 0, 0, 1, 1, + 1, 0, 0, 1, 30, 47, 13, 9, + 3, 0, 1, 28, 2, 0, 18, 16, + 0, } var _hcltok_range_lengths []byte = []byte{ @@ -1858,1546 +1894,1582 @@ var _hcltok_range_lengths []byte = []byte{ 3, 0, 2, 3, 1, 0, 0, 0, 0, 2, 3, 2, 4, 6, 4, 1, 1, 2, 1, 2, 1, 3, 2, 3, - 2, 0, 1, 1, 1, 1, 1, 0, - 0, 1, 1, 1, 0, 0, 0, 0, - 1, 1, 1, 0, 0, 0, 3, 0, - 1, 1, 4, 2, 3, 0, 1, 0, - 2, 2, 4, 2, 2, 3, 1, 1, - 1, 1, 0, 1, 1, 2, 2, 1, - 4, 6, 9, 6, 8, 5, 8, 7, - 10, 4, 6, 4, 7, 7, 5, 5, - 4, 5, 1, 2, 8, 4, 3, 3, - 3, 0, 3, 1, 2, 1, 2, 2, - 3, 3, 1, 3, 2, 2, 1, 2, - 2, 2, 3, 4, 4, 3, 1, 2, - 1, 3, 2, 2, 2, 2, 2, 3, - 3, 1, 1, 2, 1, 3, 2, 2, - 3, 2, 7, 0, 1, 4, 1, 2, - 4, 2, 1, 2, 0, 2, 2, 3, - 5, 5, 1, 4, 1, 1, 2, 2, - 1, 0, 0, 1, 1, 1, 1, 1, - 2, 2, 2, 2, 1, 1, 1, 4, - 2, 2, 3, 1, 4, 4, 6, 1, - 3, 1, 1, 2, 1, 1, 1, 5, - 3, 1, 1, 1, 2, 3, 3, 1, - 2, 2, 1, 4, 1, 2, 5, 2, - 1, 1, 0, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 1, 1, 2, 4, - 2, 1, 2, 2, 2, 6, 1, 1, - 2, 1, 2, 1, 1, 1, 2, 2, - 2, 1, 3, 2, 5, 2, 8, 6, - 2, 2, 2, 2, 3, 1, 3, 1, - 2, 1, 3, 2, 2, 3, 1, 1, - 1, 1, 1, 1, 1, 2, 2, 4, - 1, 2, 1, 0, 1, 1, 1, 1, - 0, 1, 2, 3, 1, 3, 3, 1, - 0, 3, 0, 2, 3, 1, 0, 0, - 0, 0, 2, 2, 2, 2, 1, 5, - 2, 2, 5, 7, 5, 0, 1, 0, - 1, 1, 1, 1, 1, 0, 1, 1, - 1, 2, 2, 3, 3, 4, 7, 5, - 7, 5, 3, 3, 7, 3, 13, 1, - 3, 5, 3, 5, 3, 6, 5, 2, - 2, 8, 4, 1, 2, 3, 2, 10, - 2, 2, 0, 2, 3, 3, 1, 2, - 3, 3, 1, 2, 3, 3, 4, 4, - 2, 1, 2, 2, 3, 2, 2, 5, - 3, 2, 3, 2, 1, 3, 3, 6, - 2, 2, 5, 2, 5, 1, 1, 2, - 4, 1, 11, 1, 3, 8, 4, 2, - 1, 0, 4, 3, 3, 3, 2, 9, - 1, 1, 4, 3, 2, 2, 2, 3, - 4, 2, 3, 2, 4, 3, 2, 2, - 3, 3, 4, 3, 3, 4, 2, 5, - 4, 8, 7, 1, 2, 1, 3, 1, - 2, 5, 1, 2, 2, 2, 2, 1, - 3, 2, 2, 3, 3, 1, 9, 1, - 5, 1, 3, 2, 2, 3, 2, 3, - 3, 3, 1, 3, 3, 2, 2, 4, - 5, 3, 3, 4, 3, 3, 3, 2, - 2, 2, 4, 2, 2, 1, 3, 3, - 3, 3, 3, 3, 2, 2, 3, 2, - 3, 3, 2, 3, 2, 3, 1, 2, - 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 3, 2, 3, 2, 3, 5, - 3, 3, 1, 2, 3, 2, 2, 1, - 2, 3, 4, 3, 0, 3, 0, 2, - 3, 1, 0, 0, 0, 0, 2, 3, - 2, 4, 6, 4, 1, 1, 2, 1, - 2, 1, 3, 2, 3, 2, 11, 0, - 0, 0, 0, 0, 0, 0, 0, 1, - 0, 0, 0, 5, 0, 0, 1, 1, - 1, 0, 1, 1, 5, 4, 2, 0, - 1, 0, 2, 2, 5, 2, 3, 5, - 3, 2, 3, 5, 1, 1, 1, 3, - 1, 1, 2, 2, 3, 1, 2, 3, - 1, 5, 6, 6, 4, 5, 5, 6, - 0, 0, 0, 0, 1, 1, 1, 5, - 6, 0, 0, 1, 1, 1, 5, 6, - 0, 0, 1, 1, 1, 8, 5, 1, + 2, 0, 0, 1, 1, 1, 1, 1, + 0, 0, 0, 1, 1, 1, 0, 0, + 0, 0, 1, 1, 1, 0, 0, 0, + 3, 0, 1, 1, 4, 2, 3, 0, + 1, 0, 2, 2, 4, 2, 2, 3, + 1, 1, 1, 1, 0, 1, 1, 2, + 2, 1, 4, 6, 9, 6, 8, 5, + 8, 7, 10, 4, 6, 4, 7, 7, + 5, 5, 4, 5, 1, 2, 8, 4, + 3, 3, 3, 0, 3, 1, 2, 1, + 2, 2, 3, 3, 1, 3, 2, 2, + 1, 2, 2, 2, 3, 4, 4, 3, + 1, 2, 1, 3, 2, 2, 2, 2, + 2, 3, 3, 1, 1, 2, 1, 3, + 2, 2, 3, 2, 7, 0, 1, 4, + 1, 2, 4, 2, 1, 2, 0, 2, + 2, 3, 5, 5, 1, 4, 1, 1, + 2, 2, 1, 0, 0, 1, 1, 1, + 1, 1, 2, 2, 2, 2, 1, 1, + 1, 4, 2, 2, 3, 1, 4, 4, + 6, 1, 3, 1, 1, 2, 1, 1, + 1, 5, 3, 1, 1, 1, 2, 3, + 3, 1, 2, 2, 1, 4, 1, 2, + 5, 2, 1, 1, 0, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 1, 1, + 2, 4, 2, 1, 2, 2, 2, 6, + 1, 1, 2, 1, 2, 1, 1, 1, + 2, 2, 2, 1, 3, 2, 5, 2, + 8, 6, 2, 2, 2, 2, 3, 1, + 3, 1, 2, 1, 3, 2, 2, 3, + 1, 1, 1, 1, 1, 1, 1, 2, + 2, 4, 1, 2, 1, 0, 1, 1, + 1, 1, 0, 1, 2, 3, 1, 3, + 3, 1, 0, 3, 0, 2, 3, 1, + 0, 0, 0, 0, 2, 2, 2, 2, + 1, 5, 2, 2, 5, 7, 5, 0, + 1, 0, 1, 1, 1, 1, 1, 0, + 1, 1, 1, 2, 2, 3, 3, 4, + 7, 5, 7, 5, 3, 3, 7, 3, + 13, 1, 3, 5, 3, 5, 3, 6, + 5, 2, 2, 8, 4, 1, 2, 3, + 2, 10, 2, 2, 0, 2, 3, 3, + 1, 2, 3, 3, 1, 2, 3, 3, + 4, 4, 2, 1, 2, 2, 3, 2, + 2, 5, 3, 2, 3, 2, 1, 3, + 3, 6, 2, 2, 5, 2, 5, 1, + 1, 2, 4, 1, 11, 1, 3, 8, + 4, 2, 1, 0, 4, 3, 3, 3, + 2, 9, 1, 1, 4, 3, 2, 2, + 2, 3, 4, 2, 3, 2, 4, 3, + 2, 2, 3, 3, 4, 3, 3, 4, + 2, 5, 4, 8, 7, 1, 2, 1, + 3, 1, 2, 5, 1, 2, 2, 2, + 2, 1, 3, 2, 2, 3, 3, 1, + 9, 1, 5, 1, 3, 2, 2, 3, + 2, 3, 3, 3, 1, 3, 3, 2, + 2, 4, 5, 3, 3, 4, 3, 3, + 3, 2, 2, 2, 4, 2, 2, 1, + 3, 3, 3, 3, 3, 3, 2, 2, + 3, 2, 3, 3, 2, 3, 2, 3, + 1, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 3, 2, 3, 2, + 3, 5, 3, 3, 1, 2, 3, 2, + 2, 1, 2, 3, 4, 3, 0, 3, + 0, 2, 3, 1, 0, 0, 0, 0, + 2, 3, 2, 4, 6, 4, 1, 1, + 2, 1, 2, 1, 3, 2, 3, 2, + 11, 0, 0, 0, 0, 0, 0, 0, + 1, 0, 0, 0, 5, 0, 0, 1, 1, 1, 0, 1, 1, 5, 4, 2, 0, 1, 0, 2, 2, 5, 2, 3, 5, 3, 2, 3, 5, 1, 1, 1, 3, 1, 1, 2, 2, 3, 1, 2, - 3, 1, + 3, 1, 5, 5, 5, 5, 5, 3, + 5, 5, 5, 5, 3, 5, 5, 5, + 5, 5, 5, 5, 5, 0, 0, 0, + 0, 1, 1, 1, 5, 5, 5, 5, + 0, 0, 1, 1, 1, 5, 6, 0, + 0, 1, 1, 1, 8, 5, 1, 1, + 1, 0, 1, 1, 5, 4, 2, 0, + 1, 0, 2, 2, 5, 2, 3, 5, + 3, 2, 3, 5, 1, 1, 1, 3, + 1, 1, 2, 2, 3, 1, 2, 3, + 1, } var _hcltok_index_offsets []int16 = []int16{ 0, 0, 2, 4, 6, 9, 14, 18, - 20, 57, 93, 135, 137, 142, 146, 147, - 149, 151, 157, 162, 167, 169, 172, 174, - 177, 181, 187, 190, 193, 199, 201, 203, - 205, 208, 241, 243, 245, 248, 251, 254, - 262, 270, 281, 289, 298, 306, 315, 324, - 336, 343, 350, 358, 366, 375, 381, 389, - 395, 403, 405, 408, 422, 428, 436, 440, - 444, 446, 493, 495, 498, 500, 505, 511, - 517, 522, 525, 529, 532, 535, 537, 540, - 543, 546, 550, 555, 560, 564, 566, 569, - 571, 575, 578, 581, 584, 587, 591, 596, - 600, 602, 604, 607, 609, 613, 616, 619, - 627, 631, 639, 655, 657, 662, 664, 668, - 679, 683, 685, 688, 690, 693, 698, 702, - 708, 714, 725, 730, 733, 736, 739, 742, - 744, 748, 749, 752, 754, 784, 786, 788, - 791, 795, 798, 802, 804, 806, 808, 814, - 817, 820, 824, 826, 831, 836, 843, 846, - 850, 854, 856, 859, 879, 881, 883, 890, - 894, 896, 898, 900, 903, 907, 911, 913, - 917, 920, 922, 927, 945, 984, 990, 993, - 995, 997, 999, 1002, 1005, 1008, 1011, 1014, - 1018, 1021, 1024, 1027, 1029, 1031, 1034, 1041, - 1044, 1046, 1049, 1052, 1055, 1063, 1065, 1067, - 1070, 1072, 1075, 1077, 1079, 1109, 1112, 1115, - 1118, 1121, 1126, 1130, 1137, 1140, 1149, 1158, - 1161, 1165, 1168, 1171, 1175, 1177, 1181, 1183, - 1186, 1188, 1192, 1196, 1200, 1208, 1210, 1212, - 1216, 1220, 1222, 1235, 1237, 1240, 1243, 1248, - 1250, 1253, 1255, 1257, 1260, 1265, 1267, 1269, - 1274, 1276, 1279, 1283, 1303, 1307, 1311, 1313, - 1315, 1323, 1325, 1332, 1337, 1339, 1343, 1346, - 1349, 1352, 1356, 1359, 1362, 1366, 1376, 1382, - 1385, 1388, 1398, 1418, 1424, 1427, 1429, 1433, - 1435, 1438, 1440, 1444, 1446, 1448, 1452, 1454, - 1458, 1463, 1469, 1471, 1473, 1476, 1478, 1482, - 1489, 1492, 1494, 1497, 1501, 1531, 1536, 1538, - 1541, 1545, 1554, 1559, 1567, 1571, 1579, 1583, - 1591, 1595, 1606, 1608, 1614, 1617, 1625, 1629, - 1634, 1639, 1644, 1646, 1649, 1664, 1668, 1670, - 1673, 1675, 1724, 1727, 1734, 1737, 1739, 1743, - 1747, 1750, 1754, 1756, 1759, 1761, 1763, 1765, - 1767, 1771, 1773, 1775, 1778, 1782, 1796, 1799, - 1803, 1806, 1811, 1822, 1827, 1830, 1860, 1864, - 1867, 1872, 1874, 1878, 1881, 1884, 1886, 1891, - 1893, 1899, 1904, 1910, 1912, 1932, 1940, 1943, - 1945, 1963, 2001, 2003, 2006, 2008, 2013, 2016, - 2045, 2047, 2049, 2051, 2053, 2056, 2058, 2062, - 2065, 2067, 2070, 2072, 2074, 2077, 2079, 2081, - 2083, 2085, 2087, 2090, 2093, 2096, 2109, 2111, - 2115, 2118, 2120, 2125, 2128, 2142, 2145, 2154, - 2156, 2161, 2165, 2166, 2168, 2170, 2176, 2181, - 2186, 2188, 2191, 2193, 2196, 2200, 2206, 2209, - 2212, 2218, 2220, 2222, 2224, 2227, 2260, 2262, - 2264, 2267, 2270, 2273, 2281, 2289, 2300, 2308, - 2317, 2325, 2334, 2343, 2355, 2362, 2369, 2377, - 2385, 2394, 2400, 2408, 2414, 2422, 2424, 2427, - 2441, 2447, 2455, 2459, 2463, 2465, 2512, 2514, - 2517, 2519, 2524, 2530, 2536, 2541, 2544, 2548, - 2551, 2554, 2556, 2559, 2562, 2565, 2569, 2574, - 2579, 2583, 2585, 2588, 2590, 2594, 2597, 2600, - 2603, 2606, 2610, 2615, 2619, 2621, 2623, 2626, - 2628, 2632, 2635, 2638, 2646, 2650, 2658, 2674, - 2676, 2681, 2683, 2687, 2698, 2702, 2704, 2707, - 2709, 2712, 2717, 2721, 2727, 2733, 2744, 2749, - 2752, 2755, 2758, 2761, 2763, 2767, 2768, 2771, - 2773, 2803, 2805, 2807, 2810, 2814, 2817, 2821, - 2823, 2825, 2827, 2833, 2836, 2839, 2843, 2845, - 2850, 2855, 2862, 2865, 2869, 2873, 2875, 2878, - 2898, 2900, 2902, 2909, 2913, 2915, 2917, 2919, - 2922, 2926, 2930, 2932, 2936, 2939, 2941, 2946, - 2964, 3003, 3009, 3012, 3014, 3016, 3018, 3021, - 3024, 3027, 3030, 3033, 3037, 3040, 3043, 3046, - 3048, 3050, 3053, 3060, 3063, 3065, 3068, 3071, - 3074, 3082, 3084, 3086, 3089, 3091, 3094, 3096, - 3098, 3128, 3131, 3134, 3137, 3140, 3145, 3149, - 3156, 3159, 3168, 3177, 3180, 3184, 3187, 3190, - 3194, 3196, 3200, 3202, 3205, 3207, 3211, 3215, - 3219, 3227, 3229, 3231, 3235, 3239, 3241, 3254, - 3256, 3259, 3262, 3267, 3269, 3272, 3274, 3276, - 3279, 3284, 3286, 3288, 3293, 3295, 3298, 3302, - 3322, 3326, 3330, 3332, 3334, 3342, 3344, 3351, - 3356, 3358, 3362, 3365, 3368, 3371, 3375, 3378, - 3381, 3385, 3395, 3401, 3404, 3407, 3417, 3437, - 3443, 3446, 3448, 3452, 3454, 3457, 3459, 3463, - 3465, 3467, 3471, 3473, 3475, 3481, 3484, 3489, - 3494, 3500, 3510, 3518, 3530, 3537, 3547, 3553, - 3565, 3571, 3589, 3592, 3600, 3606, 3616, 3623, - 3630, 3638, 3646, 3649, 3654, 3674, 3680, 3683, - 3687, 3691, 3695, 3707, 3710, 3715, 3716, 3722, - 3729, 3735, 3738, 3741, 3745, 3749, 3752, 3755, - 3760, 3764, 3770, 3776, 3779, 3783, 3786, 3789, - 3794, 3797, 3800, 3806, 3810, 3813, 3817, 3820, - 3823, 3827, 3831, 3838, 3841, 3844, 3850, 3853, - 3860, 3862, 3864, 3867, 3876, 3881, 3895, 3899, - 3903, 3918, 3924, 3927, 3930, 3932, 3937, 3943, - 3947, 3955, 3961, 3971, 3974, 3977, 3982, 3986, - 3989, 3992, 3995, 3999, 4004, 4008, 4012, 4015, - 4020, 4025, 4028, 4034, 4038, 4044, 4049, 4053, - 4057, 4065, 4068, 4076, 4082, 4092, 4103, 4106, - 4109, 4111, 4115, 4117, 4120, 4131, 4135, 4138, - 4141, 4144, 4147, 4149, 4153, 4157, 4160, 4164, - 4169, 4172, 4182, 4184, 4225, 4231, 4235, 4238, - 4241, 4245, 4248, 4252, 4256, 4261, 4263, 4267, - 4271, 4274, 4277, 4282, 4291, 4295, 4300, 4305, - 4309, 4316, 4320, 4323, 4327, 4330, 4335, 4338, - 4341, 4371, 4375, 4379, 4383, 4387, 4392, 4396, - 4402, 4406, 4414, 4417, 4422, 4426, 4429, 4434, - 4437, 4441, 4444, 4447, 4450, 4453, 4456, 4460, - 4464, 4467, 4477, 4480, 4483, 4488, 4494, 4497, - 4512, 4515, 4519, 4525, 4529, 4533, 4536, 4540, - 4547, 4550, 4553, 4559, 4562, 4566, 4571, 4587, - 4589, 4597, 4599, 4607, 4613, 4615, 4619, 4622, - 4625, 4628, 4632, 4643, 4646, 4658, 4682, 4690, - 4692, 4696, 4699, 4704, 4707, 4709, 4714, 4717, - 4723, 4726, 4728, 4730, 4732, 4734, 4736, 4738, - 4740, 4742, 4744, 4746, 4748, 4750, 4752, 4754, - 4756, 4758, 4760, 4762, 4764, 4766, 4771, 4775, - 4776, 4778, 4780, 4786, 4791, 4796, 4798, 4801, - 4803, 4806, 4810, 4816, 4819, 4822, 4828, 4830, - 4832, 4834, 4837, 4870, 4872, 4874, 4877, 4880, - 4883, 4891, 4899, 4910, 4918, 4927, 4935, 4944, - 4953, 4965, 4972, 4979, 4987, 4995, 5004, 5010, - 5018, 5024, 5032, 5034, 5037, 5051, 5057, 5065, - 5069, 5073, 5075, 5122, 5124, 5127, 5129, 5134, - 5140, 5146, 5151, 5154, 5158, 5161, 5164, 5166, - 5169, 5172, 5175, 5179, 5184, 5189, 5193, 5195, - 5198, 5200, 5204, 5207, 5210, 5213, 5216, 5220, - 5225, 5229, 5231, 5233, 5236, 5238, 5242, 5245, - 5248, 5256, 5260, 5268, 5284, 5286, 5291, 5293, - 5297, 5308, 5312, 5314, 5317, 5319, 5322, 5327, - 5331, 5337, 5343, 5354, 5359, 5362, 5365, 5368, - 5371, 5373, 5377, 5378, 5381, 5383, 5413, 5415, - 5417, 5420, 5424, 5427, 5431, 5433, 5435, 5437, - 5443, 5446, 5449, 5453, 5455, 5460, 5465, 5472, - 5475, 5479, 5483, 5485, 5488, 5508, 5510, 5512, - 5519, 5523, 5525, 5527, 5529, 5532, 5536, 5540, - 5542, 5546, 5549, 5551, 5556, 5574, 5613, 5619, - 5622, 5624, 5626, 5628, 5631, 5634, 5637, 5640, - 5643, 5647, 5650, 5653, 5656, 5658, 5660, 5663, - 5670, 5673, 5675, 5678, 5681, 5684, 5692, 5694, - 5696, 5699, 5701, 5704, 5706, 5708, 5738, 5741, - 5744, 5747, 5750, 5755, 5759, 5766, 5769, 5778, - 5787, 5790, 5794, 5797, 5800, 5804, 5806, 5810, - 5812, 5815, 5817, 5821, 5825, 5829, 5837, 5839, - 5841, 5845, 5849, 5851, 5864, 5866, 5869, 5872, - 5877, 5879, 5882, 5884, 5886, 5889, 5894, 5896, - 5898, 5903, 5905, 5908, 5912, 5932, 5936, 5940, - 5942, 5944, 5952, 5954, 5961, 5966, 5968, 5972, - 5975, 5978, 5981, 5985, 5988, 5991, 5995, 6005, - 6011, 6014, 6017, 6027, 6047, 6053, 6056, 6058, - 6062, 6064, 6067, 6069, 6073, 6075, 6077, 6081, - 6083, 6085, 6091, 6094, 6099, 6104, 6110, 6120, - 6128, 6140, 6147, 6157, 6163, 6175, 6181, 6199, - 6202, 6210, 6216, 6226, 6233, 6240, 6248, 6256, - 6259, 6264, 6284, 6290, 6293, 6297, 6301, 6305, - 6317, 6320, 6325, 6326, 6332, 6339, 6345, 6348, - 6351, 6355, 6359, 6362, 6365, 6370, 6374, 6380, - 6386, 6389, 6393, 6396, 6399, 6404, 6407, 6410, - 6416, 6420, 6423, 6427, 6430, 6433, 6437, 6441, - 6448, 6451, 6454, 6460, 6463, 6470, 6472, 6474, - 6477, 6486, 6491, 6505, 6509, 6513, 6528, 6534, - 6537, 6540, 6542, 6547, 6553, 6557, 6565, 6571, - 6581, 6584, 6587, 6592, 6596, 6599, 6602, 6605, - 6609, 6614, 6618, 6622, 6625, 6630, 6635, 6638, - 6644, 6648, 6654, 6659, 6663, 6667, 6675, 6678, - 6686, 6692, 6702, 6713, 6716, 6719, 6721, 6725, - 6727, 6730, 6741, 6745, 6748, 6751, 6754, 6757, - 6759, 6763, 6767, 6770, 6774, 6779, 6782, 6792, - 6794, 6835, 6841, 6845, 6848, 6851, 6855, 6858, - 6862, 6866, 6871, 6873, 6877, 6881, 6884, 6887, - 6892, 6901, 6905, 6910, 6915, 6919, 6926, 6930, - 6933, 6937, 6940, 6945, 6948, 6951, 6981, 6985, - 6989, 6993, 6997, 7002, 7006, 7012, 7016, 7024, - 7027, 7032, 7036, 7039, 7044, 7047, 7051, 7054, - 7057, 7060, 7063, 7066, 7070, 7074, 7077, 7087, - 7090, 7093, 7098, 7104, 7107, 7122, 7125, 7129, - 7135, 7139, 7143, 7146, 7150, 7157, 7160, 7163, - 7169, 7172, 7176, 7181, 7197, 7199, 7207, 7209, - 7217, 7223, 7225, 7229, 7232, 7235, 7238, 7242, - 7253, 7256, 7268, 7292, 7300, 7302, 7306, 7309, - 7314, 7317, 7319, 7324, 7327, 7333, 7336, 7401, - 7404, 7406, 7408, 7410, 7412, 7414, 7417, 7419, - 7424, 7427, 7430, 7432, 7472, 7474, 7476, 7478, - 7483, 7487, 7488, 7490, 7492, 7499, 7506, 7513, - 7515, 7517, 7519, 7522, 7525, 7531, 7534, 7539, - 7546, 7551, 7554, 7558, 7565, 7597, 7646, 7661, - 7674, 7679, 7681, 7685, 7716, 7722, 7724, 7745, - 7765, 7767, 7779, 7790, 7800, 7806, 7816, 7826, - 7837, 7839, 7841, 7843, 7845, 7847, 7849, 7851, - 7861, 7870, 7872, 7874, 7876, 7878, 7880, 7890, - 7899, 7901, 7903, 7905, 7907, 7909, 7950, 7990, - 7992, 7997, 8001, 8002, 8004, 8006, 8013, 8020, - 8027, 8029, 8031, 8033, 8036, 8039, 8045, 8048, - 8053, 8060, 8065, 8068, 8072, 8079, 8111, 8160, - 8175, 8188, 8193, 8195, 8199, 8230, 8236, 8238, - 8259, 8279, + 20, 58, 95, 137, 139, 144, 148, 149, + 151, 153, 159, 164, 169, 171, 174, 176, + 179, 183, 189, 192, 195, 201, 203, 205, + 207, 210, 243, 245, 247, 250, 253, 256, + 264, 272, 283, 291, 300, 308, 317, 326, + 338, 345, 352, 360, 368, 377, 383, 391, + 397, 405, 407, 410, 424, 430, 438, 442, + 446, 448, 495, 497, 500, 502, 507, 513, + 519, 524, 527, 531, 534, 537, 539, 542, + 545, 548, 552, 557, 562, 566, 568, 571, + 573, 577, 580, 583, 586, 589, 593, 598, + 602, 604, 606, 609, 611, 615, 618, 621, + 629, 633, 641, 657, 659, 664, 666, 670, + 681, 685, 687, 690, 692, 695, 700, 704, + 710, 716, 727, 732, 735, 738, 741, 744, + 746, 750, 751, 754, 756, 786, 788, 790, + 793, 797, 800, 804, 806, 808, 810, 816, + 819, 822, 826, 828, 833, 838, 845, 848, + 852, 856, 858, 861, 881, 883, 885, 892, + 896, 898, 900, 902, 905, 909, 913, 915, + 919, 922, 924, 929, 947, 986, 992, 995, + 997, 999, 1001, 1004, 1007, 1010, 1013, 1016, + 1020, 1023, 1026, 1029, 1031, 1033, 1036, 1043, + 1046, 1048, 1051, 1054, 1057, 1065, 1067, 1069, + 1072, 1074, 1077, 1079, 1081, 1111, 1114, 1117, + 1120, 1123, 1128, 1132, 1139, 1142, 1151, 1160, + 1163, 1167, 1170, 1173, 1177, 1179, 1183, 1185, + 1188, 1190, 1194, 1198, 1202, 1210, 1212, 1214, + 1218, 1222, 1224, 1237, 1239, 1242, 1245, 1250, + 1252, 1255, 1257, 1259, 1262, 1267, 1269, 1271, + 1276, 1278, 1281, 1285, 1305, 1309, 1313, 1315, + 1317, 1325, 1327, 1334, 1339, 1341, 1345, 1348, + 1351, 1354, 1358, 1361, 1364, 1368, 1378, 1384, + 1387, 1390, 1400, 1420, 1426, 1429, 1431, 1435, + 1437, 1440, 1442, 1446, 1448, 1450, 1454, 1456, + 1460, 1465, 1471, 1473, 1475, 1478, 1480, 1484, + 1491, 1494, 1496, 1499, 1503, 1533, 1538, 1540, + 1543, 1547, 1556, 1561, 1569, 1573, 1581, 1585, + 1593, 1597, 1608, 1610, 1616, 1619, 1627, 1631, + 1636, 1641, 1646, 1648, 1651, 1666, 1670, 1672, + 1675, 1677, 1726, 1729, 1736, 1739, 1741, 1745, + 1749, 1752, 1756, 1758, 1761, 1763, 1765, 1767, + 1769, 1773, 1775, 1777, 1780, 1784, 1798, 1801, + 1805, 1808, 1813, 1824, 1829, 1832, 1862, 1866, + 1869, 1874, 1876, 1880, 1883, 1886, 1888, 1893, + 1895, 1901, 1906, 1912, 1914, 1934, 1942, 1945, + 1947, 1965, 2003, 2005, 2008, 2010, 2015, 2018, + 2047, 2049, 2051, 2053, 2055, 2058, 2060, 2064, + 2067, 2069, 2072, 2074, 2076, 2079, 2081, 2083, + 2085, 2087, 2089, 2092, 2095, 2098, 2111, 2113, + 2117, 2120, 2122, 2127, 2130, 2144, 2147, 2156, + 2158, 2163, 2167, 2168, 2170, 2172, 2178, 2183, + 2188, 2190, 2193, 2195, 2198, 2202, 2208, 2211, + 2214, 2220, 2222, 2224, 2226, 2229, 2262, 2264, + 2266, 2269, 2272, 2275, 2283, 2291, 2302, 2310, + 2319, 2327, 2336, 2345, 2357, 2364, 2371, 2379, + 2387, 2396, 2402, 2410, 2416, 2424, 2426, 2429, + 2443, 2449, 2457, 2461, 2465, 2467, 2514, 2516, + 2519, 2521, 2526, 2532, 2538, 2543, 2546, 2550, + 2553, 2556, 2558, 2561, 2564, 2567, 2571, 2576, + 2581, 2585, 2587, 2590, 2592, 2596, 2599, 2602, + 2605, 2608, 2612, 2617, 2621, 2623, 2625, 2628, + 2630, 2634, 2637, 2640, 2648, 2652, 2660, 2676, + 2678, 2683, 2685, 2689, 2700, 2704, 2706, 2709, + 2711, 2714, 2719, 2723, 2729, 2735, 2746, 2751, + 2754, 2757, 2760, 2763, 2765, 2769, 2770, 2773, + 2775, 2805, 2807, 2809, 2812, 2816, 2819, 2823, + 2825, 2827, 2829, 2835, 2838, 2841, 2845, 2847, + 2852, 2857, 2864, 2867, 2871, 2875, 2877, 2880, + 2900, 2902, 2904, 2911, 2915, 2917, 2919, 2921, + 2924, 2928, 2932, 2934, 2938, 2941, 2943, 2948, + 2966, 3005, 3011, 3014, 3016, 3018, 3020, 3023, + 3026, 3029, 3032, 3035, 3039, 3042, 3045, 3048, + 3050, 3052, 3055, 3062, 3065, 3067, 3070, 3073, + 3076, 3084, 3086, 3088, 3091, 3093, 3096, 3098, + 3100, 3130, 3133, 3136, 3139, 3142, 3147, 3151, + 3158, 3161, 3170, 3179, 3182, 3186, 3189, 3192, + 3196, 3198, 3202, 3204, 3207, 3209, 3213, 3217, + 3221, 3229, 3231, 3233, 3237, 3241, 3243, 3256, + 3258, 3261, 3264, 3269, 3271, 3274, 3276, 3278, + 3281, 3286, 3288, 3290, 3295, 3297, 3300, 3304, + 3324, 3328, 3332, 3334, 3336, 3344, 3346, 3353, + 3358, 3360, 3364, 3367, 3370, 3373, 3377, 3380, + 3383, 3387, 3397, 3403, 3406, 3409, 3419, 3439, + 3445, 3448, 3450, 3454, 3456, 3459, 3461, 3465, + 3467, 3469, 3473, 3475, 3477, 3483, 3486, 3491, + 3496, 3502, 3512, 3520, 3532, 3539, 3549, 3555, + 3567, 3573, 3591, 3594, 3602, 3608, 3618, 3625, + 3632, 3640, 3648, 3651, 3656, 3676, 3682, 3685, + 3689, 3693, 3697, 3709, 3712, 3717, 3718, 3724, + 3731, 3737, 3740, 3743, 3747, 3751, 3754, 3757, + 3762, 3766, 3772, 3778, 3781, 3785, 3788, 3791, + 3796, 3799, 3802, 3808, 3812, 3815, 3819, 3822, + 3825, 3829, 3833, 3840, 3843, 3846, 3852, 3855, + 3862, 3864, 3866, 3869, 3878, 3883, 3897, 3901, + 3905, 3920, 3926, 3929, 3932, 3934, 3939, 3945, + 3949, 3957, 3963, 3973, 3976, 3979, 3984, 3988, + 3991, 3994, 3997, 4001, 4006, 4010, 4014, 4017, + 4022, 4027, 4030, 4036, 4040, 4046, 4051, 4055, + 4059, 4067, 4070, 4078, 4084, 4094, 4105, 4108, + 4111, 4113, 4117, 4119, 4122, 4133, 4137, 4140, + 4143, 4146, 4149, 4151, 4155, 4159, 4162, 4166, + 4171, 4174, 4184, 4186, 4227, 4233, 4237, 4240, + 4243, 4247, 4250, 4254, 4258, 4263, 4265, 4269, + 4273, 4276, 4279, 4284, 4293, 4297, 4302, 4307, + 4311, 4318, 4322, 4325, 4329, 4332, 4337, 4340, + 4343, 4373, 4377, 4381, 4385, 4389, 4394, 4398, + 4404, 4408, 4416, 4419, 4424, 4428, 4431, 4436, + 4439, 4443, 4446, 4449, 4452, 4455, 4458, 4462, + 4466, 4469, 4479, 4482, 4485, 4490, 4496, 4499, + 4514, 4517, 4521, 4527, 4531, 4535, 4538, 4542, + 4549, 4552, 4555, 4561, 4564, 4568, 4573, 4589, + 4591, 4599, 4601, 4609, 4615, 4617, 4621, 4624, + 4627, 4630, 4634, 4645, 4648, 4660, 4684, 4692, + 4694, 4698, 4701, 4706, 4709, 4711, 4716, 4719, + 4725, 4728, 4730, 4732, 4734, 4736, 4738, 4740, + 4742, 4744, 4746, 4748, 4750, 4752, 4754, 4756, + 4758, 4760, 4762, 4764, 4766, 4768, 4770, 4772, + 4777, 4781, 4782, 4784, 4786, 4792, 4797, 4802, + 4804, 4807, 4809, 4812, 4816, 4822, 4825, 4828, + 4834, 4836, 4838, 4840, 4843, 4876, 4878, 4880, + 4883, 4886, 4889, 4897, 4905, 4916, 4924, 4933, + 4941, 4950, 4959, 4971, 4978, 4985, 4993, 5001, + 5010, 5016, 5024, 5030, 5038, 5040, 5043, 5057, + 5063, 5071, 5075, 5079, 5081, 5128, 5130, 5133, + 5135, 5140, 5146, 5152, 5157, 5160, 5164, 5167, + 5170, 5172, 5175, 5178, 5181, 5185, 5190, 5195, + 5199, 5201, 5204, 5206, 5210, 5213, 5216, 5219, + 5222, 5226, 5231, 5235, 5237, 5239, 5242, 5244, + 5248, 5251, 5254, 5262, 5266, 5274, 5290, 5292, + 5297, 5299, 5303, 5314, 5318, 5320, 5323, 5325, + 5328, 5333, 5337, 5343, 5349, 5360, 5365, 5368, + 5371, 5374, 5377, 5379, 5383, 5384, 5387, 5389, + 5419, 5421, 5423, 5426, 5430, 5433, 5437, 5439, + 5441, 5443, 5449, 5452, 5455, 5459, 5461, 5466, + 5471, 5478, 5481, 5485, 5489, 5491, 5494, 5514, + 5516, 5518, 5525, 5529, 5531, 5533, 5535, 5538, + 5542, 5546, 5548, 5552, 5555, 5557, 5562, 5580, + 5619, 5625, 5628, 5630, 5632, 5634, 5637, 5640, + 5643, 5646, 5649, 5653, 5656, 5659, 5662, 5664, + 5666, 5669, 5676, 5679, 5681, 5684, 5687, 5690, + 5698, 5700, 5702, 5705, 5707, 5710, 5712, 5714, + 5744, 5747, 5750, 5753, 5756, 5761, 5765, 5772, + 5775, 5784, 5793, 5796, 5800, 5803, 5806, 5810, + 5812, 5816, 5818, 5821, 5823, 5827, 5831, 5835, + 5843, 5845, 5847, 5851, 5855, 5857, 5870, 5872, + 5875, 5878, 5883, 5885, 5888, 5890, 5892, 5895, + 5900, 5902, 5904, 5909, 5911, 5914, 5918, 5938, + 5942, 5946, 5948, 5950, 5958, 5960, 5967, 5972, + 5974, 5978, 5981, 5984, 5987, 5991, 5994, 5997, + 6001, 6011, 6017, 6020, 6023, 6033, 6053, 6059, + 6062, 6064, 6068, 6070, 6073, 6075, 6079, 6081, + 6083, 6087, 6089, 6091, 6097, 6100, 6105, 6110, + 6116, 6126, 6134, 6146, 6153, 6163, 6169, 6181, + 6187, 6205, 6208, 6216, 6222, 6232, 6239, 6246, + 6254, 6262, 6265, 6270, 6290, 6296, 6299, 6303, + 6307, 6311, 6323, 6326, 6331, 6332, 6338, 6345, + 6351, 6354, 6357, 6361, 6365, 6368, 6371, 6376, + 6380, 6386, 6392, 6395, 6399, 6402, 6405, 6410, + 6413, 6416, 6422, 6426, 6429, 6433, 6436, 6439, + 6443, 6447, 6454, 6457, 6460, 6466, 6469, 6476, + 6478, 6480, 6483, 6492, 6497, 6511, 6515, 6519, + 6534, 6540, 6543, 6546, 6548, 6553, 6559, 6563, + 6571, 6577, 6587, 6590, 6593, 6598, 6602, 6605, + 6608, 6611, 6615, 6620, 6624, 6628, 6631, 6636, + 6641, 6644, 6650, 6654, 6660, 6665, 6669, 6673, + 6681, 6684, 6692, 6698, 6708, 6719, 6722, 6725, + 6727, 6731, 6733, 6736, 6747, 6751, 6754, 6757, + 6760, 6763, 6765, 6769, 6773, 6776, 6780, 6785, + 6788, 6798, 6800, 6841, 6847, 6851, 6854, 6857, + 6861, 6864, 6868, 6872, 6877, 6879, 6883, 6887, + 6890, 6893, 6898, 6907, 6911, 6916, 6921, 6925, + 6932, 6936, 6939, 6943, 6946, 6951, 6954, 6957, + 6987, 6991, 6995, 6999, 7003, 7008, 7012, 7018, + 7022, 7030, 7033, 7038, 7042, 7045, 7050, 7053, + 7057, 7060, 7063, 7066, 7069, 7072, 7076, 7080, + 7083, 7093, 7096, 7099, 7104, 7110, 7113, 7128, + 7131, 7135, 7141, 7145, 7149, 7152, 7156, 7163, + 7166, 7169, 7175, 7178, 7182, 7187, 7203, 7205, + 7213, 7215, 7223, 7229, 7231, 7235, 7238, 7241, + 7244, 7248, 7259, 7262, 7274, 7298, 7306, 7308, + 7312, 7315, 7320, 7323, 7325, 7330, 7333, 7339, + 7342, 7407, 7410, 7412, 7414, 7416, 7418, 7420, + 7423, 7428, 7431, 7434, 7436, 7476, 7478, 7480, + 7482, 7487, 7491, 7492, 7494, 7496, 7503, 7510, + 7517, 7519, 7521, 7523, 7526, 7529, 7535, 7538, + 7543, 7550, 7555, 7558, 7562, 7569, 7601, 7650, + 7665, 7678, 7683, 7685, 7689, 7720, 7726, 7728, + 7749, 7769, 7771, 7783, 7795, 7807, 7819, 7830, + 7838, 7851, 7864, 7877, 7889, 7897, 7910, 7922, + 7934, 7946, 7958, 7970, 7982, 7994, 7996, 7998, + 8000, 8002, 8004, 8006, 8008, 8018, 8028, 8038, + 8048, 8050, 8052, 8054, 8056, 8058, 8068, 8077, + 8079, 8081, 8083, 8085, 8087, 8129, 8169, 8171, + 8176, 8180, 8181, 8183, 8185, 8192, 8199, 8206, + 8208, 8210, 8212, 8215, 8218, 8224, 8227, 8232, + 8239, 8244, 8247, 8251, 8258, 8290, 8339, 8354, + 8367, 8372, 8374, 8378, 8409, 8415, 8417, 8438, + 8458, } var _hcltok_indicies []int16 = []int16{ - 2, 1, 4, 3, 6, 5, 6, 7, - 5, 9, 11, 11, 10, 8, 12, 12, - 10, 8, 10, 8, 13, 15, 16, 18, - 19, 20, 21, 22, 23, 24, 25, 26, - 27, 28, 29, 30, 31, 32, 33, 34, - 35, 36, 37, 38, 39, 40, 42, 43, - 44, 45, 46, 14, 14, 17, 17, 41, - 3, 15, 16, 18, 19, 20, 21, 22, - 23, 24, 25, 26, 27, 28, 29, 30, - 31, 32, 33, 34, 35, 36, 37, 38, - 39, 40, 42, 43, 44, 45, 46, 14, - 14, 17, 17, 41, 3, 47, 48, 14, - 14, 49, 16, 18, 19, 20, 19, 50, - 51, 23, 52, 25, 26, 53, 54, 55, - 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 40, 42, 66, 44, 67, 68, - 69, 14, 14, 14, 17, 41, 3, 47, - 3, 14, 14, 14, 14, 3, 14, 14, - 14, 3, 14, 3, 14, 14, 3, 3, - 3, 3, 3, 3, 14, 3, 3, 3, - 3, 14, 14, 14, 14, 14, 3, 3, - 14, 3, 3, 14, 3, 14, 3, 3, - 14, 3, 3, 3, 14, 14, 14, 14, - 14, 14, 3, 14, 14, 3, 14, 14, - 3, 3, 3, 3, 3, 3, 14, 14, - 3, 3, 14, 3, 14, 14, 14, 3, - 70, 71, 72, 73, 17, 74, 75, 76, - 77, 78, 79, 80, 81, 82, 83, 84, - 85, 86, 87, 88, 89, 90, 91, 92, - 93, 94, 95, 96, 97, 98, 99, 100, - 3, 14, 3, 14, 3, 14, 14, 3, - 14, 14, 3, 3, 3, 14, 3, 3, - 3, 3, 3, 3, 3, 14, 3, 3, - 3, 3, 3, 3, 3, 14, 14, 14, - 14, 14, 14, 14, 14, 14, 14, 14, - 3, 3, 3, 3, 3, 3, 3, 3, - 14, 14, 14, 14, 14, 14, 14, 14, - 14, 3, 3, 3, 3, 3, 3, 3, - 3, 14, 14, 14, 14, 14, 14, 14, - 14, 14, 3, 14, 14, 14, 14, 14, - 14, 14, 14, 3, 14, 14, 14, 14, - 14, 14, 14, 14, 14, 14, 14, 3, - 14, 14, 14, 14, 14, 14, 3, 14, - 14, 14, 14, 14, 14, 3, 3, 3, - 3, 3, 3, 3, 3, 14, 14, 14, - 14, 14, 14, 14, 14, 3, 14, 14, - 14, 14, 14, 14, 14, 14, 3, 14, - 14, 14, 14, 14, 3, 3, 3, 3, - 3, 3, 3, 3, 14, 14, 14, 14, - 14, 14, 3, 14, 14, 14, 14, 14, - 14, 14, 3, 14, 3, 14, 14, 3, - 14, 14, 14, 14, 14, 14, 14, 14, - 14, 14, 14, 14, 14, 3, 14, 14, - 14, 14, 14, 3, 14, 14, 14, 14, - 14, 14, 14, 3, 14, 14, 14, 3, - 14, 14, 14, 3, 14, 3, 101, 102, - 103, 104, 105, 106, 107, 108, 109, 110, - 111, 112, 113, 114, 115, 116, 117, 19, - 118, 119, 120, 121, 122, 123, 124, 125, - 126, 127, 128, 129, 130, 131, 132, 133, - 134, 135, 17, 18, 136, 137, 138, 139, - 140, 17, 19, 17, 3, 14, 3, 14, - 14, 3, 3, 14, 3, 3, 3, 3, - 14, 3, 3, 3, 3, 3, 14, 3, - 3, 3, 3, 3, 14, 14, 14, 14, - 14, 3, 3, 3, 14, 3, 3, 3, - 14, 14, 14, 3, 3, 3, 14, 14, - 3, 3, 3, 14, 14, 14, 3, 3, - 3, 14, 14, 14, 14, 3, 14, 14, - 14, 14, 3, 3, 3, 3, 3, 14, - 14, 14, 14, 3, 3, 14, 14, 14, - 3, 3, 14, 14, 14, 14, 3, 14, - 14, 3, 14, 14, 3, 3, 3, 14, - 14, 14, 3, 3, 3, 3, 14, 14, - 14, 14, 14, 3, 3, 3, 3, 14, - 3, 14, 14, 3, 14, 14, 3, 14, - 3, 14, 14, 14, 3, 14, 14, 3, - 3, 3, 14, 3, 3, 3, 3, 3, - 3, 3, 14, 14, 14, 14, 3, 14, - 14, 14, 14, 14, 14, 14, 3, 141, - 142, 143, 144, 145, 146, 147, 148, 149, - 17, 150, 151, 152, 153, 154, 3, 14, - 3, 3, 3, 3, 3, 14, 14, 3, - 14, 14, 14, 3, 14, 14, 14, 14, - 14, 14, 14, 14, 14, 14, 3, 14, - 14, 14, 3, 3, 14, 14, 14, 3, - 3, 14, 3, 3, 14, 14, 14, 14, - 14, 3, 3, 3, 3, 14, 14, 14, - 14, 14, 14, 3, 14, 14, 14, 14, - 14, 3, 155, 112, 156, 157, 158, 17, - 159, 160, 19, 17, 3, 14, 14, 14, - 14, 3, 3, 3, 14, 3, 3, 14, - 14, 14, 3, 3, 3, 14, 14, 3, - 122, 3, 19, 17, 17, 161, 3, 17, - 3, 14, 19, 162, 163, 19, 164, 165, - 19, 60, 166, 167, 168, 169, 170, 19, - 171, 172, 173, 19, 174, 175, 176, 18, - 177, 178, 179, 18, 180, 19, 17, 3, - 3, 14, 14, 3, 3, 3, 14, 14, - 14, 14, 3, 14, 14, 3, 3, 3, - 3, 14, 14, 3, 3, 14, 14, 3, - 3, 3, 3, 3, 3, 14, 14, 14, - 3, 3, 3, 14, 3, 3, 3, 14, - 14, 3, 14, 14, 14, 14, 3, 14, - 14, 14, 14, 3, 14, 14, 14, 14, - 14, 14, 3, 3, 3, 14, 14, 14, - 14, 3, 181, 182, 3, 17, 3, 14, - 3, 3, 14, 19, 183, 184, 185, 186, - 60, 187, 188, 58, 189, 190, 191, 192, - 193, 194, 195, 196, 197, 17, 3, 3, - 14, 3, 14, 14, 14, 14, 14, 14, - 14, 3, 14, 14, 14, 3, 14, 3, - 3, 14, 3, 14, 3, 3, 14, 14, - 14, 14, 3, 14, 14, 14, 3, 3, - 14, 14, 14, 14, 3, 14, 14, 3, - 3, 14, 14, 14, 14, 14, 3, 198, - 199, 200, 201, 202, 203, 204, 205, 206, - 207, 208, 204, 209, 210, 211, 212, 41, - 3, 213, 214, 19, 215, 216, 217, 218, - 219, 220, 221, 222, 223, 19, 17, 224, - 225, 226, 227, 19, 228, 229, 230, 231, - 232, 233, 234, 235, 236, 237, 238, 239, - 240, 241, 242, 19, 147, 17, 243, 3, - 14, 14, 14, 14, 14, 3, 3, 3, - 14, 3, 14, 14, 3, 14, 3, 14, - 14, 3, 3, 3, 14, 14, 14, 3, - 3, 3, 14, 14, 14, 3, 3, 3, - 3, 14, 3, 3, 14, 3, 3, 14, - 14, 14, 3, 3, 14, 3, 14, 14, - 14, 3, 14, 14, 14, 14, 14, 14, - 3, 3, 3, 14, 14, 3, 14, 14, - 3, 14, 14, 3, 14, 14, 3, 14, - 14, 14, 14, 14, 14, 14, 3, 14, - 3, 14, 3, 14, 14, 3, 14, 3, - 14, 14, 3, 14, 3, 14, 3, 244, - 215, 245, 246, 247, 248, 249, 250, 251, - 252, 253, 101, 254, 19, 255, 256, 257, - 19, 258, 132, 259, 260, 261, 262, 263, - 264, 265, 266, 19, 3, 3, 3, 14, - 14, 14, 3, 14, 14, 3, 14, 14, - 3, 3, 3, 3, 3, 14, 14, 14, - 14, 3, 14, 14, 14, 14, 14, 14, - 3, 3, 3, 14, 14, 14, 14, 14, - 14, 14, 14, 14, 3, 14, 14, 14, - 14, 14, 14, 14, 14, 3, 14, 14, - 3, 3, 3, 3, 14, 14, 14, 3, - 3, 3, 14, 3, 3, 3, 14, 14, - 3, 14, 14, 14, 3, 14, 3, 3, - 3, 14, 14, 3, 14, 14, 14, 3, - 14, 14, 14, 3, 3, 3, 3, 14, - 19, 184, 267, 268, 17, 19, 17, 3, - 3, 14, 3, 14, 19, 267, 17, 3, - 19, 269, 17, 3, 3, 14, 19, 270, - 271, 272, 175, 273, 274, 19, 275, 276, - 277, 17, 3, 3, 14, 14, 14, 3, - 14, 14, 3, 14, 14, 14, 14, 3, - 3, 14, 3, 3, 14, 14, 3, 14, - 3, 19, 17, 3, 278, 19, 279, 3, - 17, 3, 14, 3, 14, 280, 19, 281, - 282, 3, 14, 3, 3, 3, 14, 14, - 14, 14, 3, 283, 284, 285, 19, 286, - 287, 288, 289, 290, 291, 292, 293, 294, - 295, 296, 297, 298, 299, 17, 3, 14, - 14, 14, 3, 3, 3, 3, 14, 14, - 3, 3, 14, 3, 3, 3, 3, 3, - 3, 3, 14, 3, 14, 3, 3, 3, - 3, 3, 3, 14, 14, 14, 14, 14, - 3, 3, 14, 3, 3, 3, 14, 3, - 3, 14, 3, 3, 14, 3, 3, 14, - 3, 3, 3, 14, 14, 14, 3, 3, - 3, 14, 14, 14, 14, 3, 300, 19, - 301, 19, 302, 303, 304, 305, 17, 3, - 14, 14, 14, 14, 14, 3, 3, 3, - 14, 3, 3, 14, 14, 14, 14, 14, - 14, 14, 14, 14, 14, 3, 14, 14, - 14, 14, 14, 14, 14, 14, 14, 14, - 14, 14, 14, 14, 14, 14, 14, 14, - 14, 3, 14, 14, 14, 14, 14, 3, - 306, 19, 17, 3, 14, 307, 19, 103, - 17, 3, 14, 308, 3, 17, 3, 14, - 19, 309, 17, 3, 3, 14, 310, 3, - 19, 311, 17, 3, 3, 14, 14, 14, - 14, 3, 14, 14, 14, 14, 3, 14, - 14, 14, 14, 14, 3, 3, 14, 3, - 14, 14, 14, 3, 14, 3, 14, 14, - 14, 3, 3, 3, 3, 3, 3, 3, - 14, 14, 14, 3, 14, 3, 3, 3, - 14, 14, 14, 14, 3, 312, 313, 72, - 314, 315, 316, 317, 318, 319, 320, 321, - 322, 323, 324, 325, 326, 327, 328, 329, - 330, 331, 332, 334, 335, 336, 337, 338, - 339, 333, 3, 14, 14, 14, 14, 3, - 14, 3, 14, 14, 3, 14, 14, 14, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 14, 14, 14, 14, 14, 3, 14, - 14, 14, 14, 14, 14, 14, 3, 14, - 14, 14, 3, 14, 14, 14, 14, 14, - 14, 14, 3, 14, 14, 14, 3, 14, - 14, 14, 14, 14, 14, 14, 3, 14, - 14, 14, 3, 14, 14, 14, 14, 14, - 14, 14, 14, 14, 14, 3, 14, 3, - 14, 14, 14, 14, 14, 3, 14, 14, - 3, 14, 14, 14, 14, 14, 14, 14, - 3, 14, 14, 14, 3, 14, 14, 14, - 14, 3, 14, 14, 14, 14, 3, 14, - 14, 14, 14, 3, 14, 3, 14, 14, - 3, 14, 14, 14, 14, 14, 14, 14, - 14, 14, 14, 14, 14, 14, 14, 3, - 14, 14, 14, 3, 14, 3, 14, 14, - 3, 14, 3, 340, 341, 342, 104, 105, - 106, 107, 108, 343, 110, 111, 112, 113, - 114, 115, 344, 345, 170, 346, 261, 120, - 347, 122, 232, 272, 125, 348, 349, 350, - 351, 352, 353, 354, 355, 356, 357, 134, - 358, 19, 17, 18, 19, 137, 138, 139, - 140, 17, 17, 3, 14, 14, 3, 14, - 14, 14, 14, 14, 14, 3, 3, 3, - 14, 3, 14, 14, 14, 14, 3, 14, - 14, 14, 3, 14, 14, 3, 14, 14, - 14, 3, 3, 14, 14, 14, 3, 3, - 14, 14, 3, 14, 3, 14, 3, 14, - 14, 14, 3, 3, 14, 14, 3, 14, - 14, 3, 14, 14, 14, 3, 359, 143, - 145, 146, 147, 148, 149, 17, 360, 151, - 361, 153, 362, 3, 14, 14, 3, 3, - 3, 3, 14, 3, 3, 14, 14, 14, - 14, 14, 3, 363, 112, 364, 157, 158, - 17, 159, 160, 19, 17, 3, 14, 14, - 14, 14, 3, 3, 3, 14, 19, 162, - 163, 19, 365, 366, 222, 311, 166, 167, - 168, 367, 170, 368, 369, 370, 371, 372, - 373, 374, 375, 376, 377, 178, 179, 18, - 378, 19, 17, 3, 3, 3, 3, 14, - 14, 14, 3, 3, 3, 3, 3, 14, - 14, 3, 14, 14, 14, 3, 14, 14, - 3, 3, 3, 14, 14, 3, 14, 14, - 14, 14, 3, 14, 3, 14, 14, 14, - 14, 14, 3, 3, 3, 3, 3, 14, - 14, 14, 14, 14, 14, 3, 14, 3, - 19, 183, 184, 379, 186, 60, 187, 188, - 58, 189, 190, 380, 17, 193, 381, 195, - 196, 197, 17, 3, 14, 14, 14, 14, - 14, 14, 14, 3, 14, 14, 3, 14, - 3, 382, 383, 200, 201, 202, 384, 204, - 205, 385, 386, 387, 204, 209, 210, 211, - 212, 41, 3, 213, 214, 19, 215, 216, - 218, 388, 220, 389, 222, 223, 19, 17, - 390, 225, 226, 227, 19, 228, 229, 230, - 231, 232, 233, 234, 235, 391, 237, 238, - 392, 240, 241, 242, 19, 147, 17, 243, - 3, 3, 14, 3, 3, 14, 3, 14, - 14, 14, 14, 14, 3, 14, 14, 3, - 393, 394, 395, 396, 397, 398, 399, 400, - 250, 401, 322, 402, 216, 403, 404, 405, - 406, 407, 404, 408, 409, 410, 261, 411, - 263, 412, 413, 274, 3, 14, 3, 14, - 3, 14, 3, 14, 3, 14, 14, 3, - 14, 3, 14, 14, 14, 3, 14, 14, - 3, 3, 14, 14, 14, 3, 14, 3, - 14, 3, 14, 14, 3, 14, 3, 14, - 3, 14, 3, 14, 3, 14, 3, 3, - 3, 14, 14, 14, 3, 14, 14, 3, - 19, 270, 232, 414, 404, 415, 274, 19, - 416, 417, 277, 17, 3, 14, 3, 14, - 14, 14, 3, 3, 3, 14, 14, 3, - 280, 19, 281, 418, 3, 14, 14, 3, - 19, 286, 287, 288, 289, 290, 291, 292, - 293, 294, 295, 419, 17, 3, 3, 3, - 14, 19, 420, 19, 268, 303, 304, 305, - 17, 3, 3, 14, 422, 422, 422, 422, - 421, 422, 422, 422, 421, 422, 421, 422, - 422, 421, 421, 421, 421, 421, 421, 422, - 421, 421, 421, 421, 422, 422, 422, 422, - 422, 421, 421, 422, 421, 421, 422, 421, - 422, 421, 421, 422, 421, 421, 421, 422, - 422, 422, 422, 422, 422, 421, 422, 422, - 421, 422, 422, 421, 421, 421, 421, 421, - 421, 422, 422, 421, 421, 422, 421, 422, - 422, 422, 421, 423, 424, 425, 426, 427, - 428, 429, 430, 431, 432, 433, 434, 435, - 436, 437, 438, 439, 440, 441, 442, 443, - 444, 445, 446, 447, 448, 449, 450, 451, - 452, 453, 454, 421, 422, 421, 422, 421, - 422, 422, 421, 422, 422, 421, 421, 421, - 422, 421, 421, 421, 421, 421, 421, 421, - 422, 421, 421, 421, 421, 421, 421, 421, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 421, 421, 421, 421, 421, - 421, 421, 421, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 421, 421, 421, 421, - 421, 421, 421, 421, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 421, 422, 422, - 422, 422, 422, 422, 422, 422, 421, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 421, 422, 422, 422, 422, 422, - 422, 421, 422, 422, 422, 422, 422, 422, + 2, 1, 4, 3, 6, 5, 6, 2, + 5, 8, 10, 10, 9, 7, 11, 11, + 9, 7, 9, 7, 12, 13, 14, 15, + 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + 33, 34, 35, 36, 37, 38, 39, 41, + 42, 43, 44, 45, 13, 13, 16, 16, + 40, 3, 13, 14, 15, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, + 36, 37, 38, 39, 41, 42, 43, 44, + 45, 13, 13, 16, 16, 40, 3, 46, + 47, 13, 13, 48, 15, 17, 18, 19, + 18, 49, 50, 22, 51, 24, 25, 52, + 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 39, 41, 65, 43, + 66, 67, 68, 13, 13, 13, 16, 40, + 3, 46, 3, 13, 13, 13, 13, 3, + 13, 13, 13, 3, 13, 3, 13, 3, + 13, 3, 3, 3, 3, 3, 13, 3, + 3, 3, 3, 13, 13, 13, 13, 13, + 3, 3, 13, 3, 3, 13, 3, 13, + 3, 3, 13, 3, 3, 3, 13, 13, + 13, 13, 13, 13, 3, 13, 13, 3, + 13, 13, 3, 3, 3, 3, 3, 3, + 13, 13, 3, 3, 13, 3, 13, 13, + 13, 3, 69, 70, 71, 72, 16, 73, + 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, + 98, 99, 3, 13, 3, 13, 3, 13, + 13, 3, 13, 13, 3, 3, 3, 13, + 3, 3, 3, 3, 3, 3, 3, 13, + 3, 3, 3, 3, 3, 3, 3, 13, + 13, 13, 13, 13, 13, 13, 13, 13, + 13, 13, 3, 3, 3, 3, 3, 3, + 3, 3, 13, 13, 13, 13, 13, 13, + 13, 13, 13, 3, 3, 3, 3, 3, + 3, 3, 3, 13, 13, 13, 13, 13, + 13, 13, 13, 13, 3, 13, 13, 13, + 13, 13, 13, 13, 13, 3, 13, 13, + 13, 13, 13, 13, 13, 13, 13, 13, + 13, 3, 13, 13, 13, 13, 13, 13, + 3, 13, 13, 13, 13, 13, 13, 3, + 3, 3, 3, 3, 3, 3, 3, 13, + 13, 13, 13, 13, 13, 13, 13, 3, + 13, 13, 13, 13, 13, 13, 13, 13, + 3, 13, 13, 13, 13, 13, 3, 3, + 3, 3, 3, 3, 3, 3, 13, 13, + 13, 13, 13, 13, 3, 13, 13, 13, + 13, 13, 13, 13, 3, 13, 3, 13, + 13, 3, 13, 13, 13, 13, 13, 13, + 13, 13, 13, 13, 13, 13, 13, 3, + 13, 13, 13, 13, 13, 3, 13, 13, + 13, 13, 13, 13, 13, 3, 13, 13, + 13, 3, 13, 13, 13, 3, 13, 3, + 100, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115, + 116, 18, 117, 118, 119, 120, 121, 122, + 123, 124, 125, 126, 127, 128, 129, 130, + 131, 132, 133, 134, 16, 17, 135, 136, + 137, 138, 139, 16, 18, 16, 3, 13, + 3, 13, 13, 3, 3, 13, 3, 3, + 3, 3, 13, 3, 3, 3, 3, 3, + 13, 3, 3, 3, 3, 3, 13, 13, + 13, 13, 13, 3, 3, 3, 13, 3, + 3, 3, 13, 13, 13, 3, 3, 3, + 13, 13, 3, 3, 3, 13, 13, 13, + 3, 3, 3, 13, 13, 13, 13, 3, + 13, 13, 13, 13, 3, 3, 3, 3, + 3, 13, 13, 13, 13, 3, 3, 13, + 13, 13, 3, 3, 13, 13, 13, 13, + 3, 13, 13, 3, 13, 13, 3, 3, + 3, 13, 13, 13, 3, 3, 3, 3, + 13, 13, 13, 13, 13, 3, 3, 3, + 3, 13, 3, 13, 13, 3, 13, 13, + 3, 13, 3, 13, 13, 13, 3, 13, + 13, 3, 3, 3, 13, 3, 3, 3, + 3, 3, 3, 3, 13, 13, 13, 13, + 3, 13, 13, 13, 13, 13, 13, 13, + 3, 140, 141, 142, 143, 144, 145, 146, + 147, 148, 16, 149, 150, 151, 152, 153, + 3, 13, 3, 3, 3, 3, 3, 13, + 13, 3, 13, 13, 13, 3, 13, 13, + 13, 13, 13, 13, 13, 13, 13, 13, + 3, 13, 13, 13, 3, 3, 13, 13, + 13, 3, 3, 13, 3, 3, 13, 13, + 13, 13, 13, 3, 3, 3, 3, 13, + 13, 13, 13, 13, 13, 3, 13, 13, + 13, 13, 13, 3, 154, 111, 155, 156, + 157, 16, 158, 159, 18, 16, 3, 13, + 13, 13, 13, 3, 3, 3, 13, 3, + 3, 13, 13, 13, 3, 3, 3, 13, + 13, 3, 121, 3, 18, 16, 16, 160, + 3, 16, 3, 13, 18, 161, 162, 18, + 163, 164, 18, 59, 165, 166, 167, 168, + 169, 18, 170, 171, 172, 18, 173, 174, + 175, 17, 176, 177, 178, 17, 179, 18, + 16, 3, 3, 13, 13, 3, 3, 3, + 13, 13, 13, 13, 3, 13, 13, 3, + 3, 3, 3, 13, 13, 3, 3, 13, + 13, 3, 3, 3, 3, 3, 3, 13, + 13, 13, 3, 3, 3, 13, 3, 3, + 3, 13, 13, 3, 13, 13, 13, 13, + 3, 13, 13, 13, 13, 3, 13, 13, + 13, 13, 13, 13, 3, 3, 3, 13, + 13, 13, 13, 3, 180, 181, 3, 16, + 3, 13, 3, 3, 13, 18, 182, 183, + 184, 185, 59, 186, 187, 57, 188, 189, + 190, 191, 192, 193, 194, 195, 196, 16, + 3, 3, 13, 3, 13, 13, 13, 13, + 13, 13, 13, 3, 13, 13, 13, 3, + 13, 3, 3, 13, 3, 13, 3, 3, + 13, 13, 13, 13, 3, 13, 13, 13, + 3, 3, 13, 13, 13, 13, 3, 13, + 13, 3, 3, 13, 13, 13, 13, 13, + 3, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 206, 207, 203, 208, 209, 210, + 211, 40, 3, 212, 213, 18, 214, 215, + 216, 217, 218, 219, 220, 221, 222, 18, + 16, 223, 224, 225, 226, 18, 227, 228, + 229, 230, 231, 232, 233, 234, 235, 236, + 237, 238, 239, 240, 241, 18, 146, 16, + 242, 3, 13, 13, 13, 13, 13, 3, + 3, 3, 13, 3, 13, 13, 3, 13, + 3, 13, 13, 3, 3, 3, 13, 13, + 13, 3, 3, 3, 13, 13, 13, 3, + 3, 3, 3, 13, 3, 3, 13, 3, + 3, 13, 13, 13, 3, 3, 13, 3, + 13, 13, 13, 3, 13, 13, 13, 13, + 13, 13, 3, 3, 3, 13, 13, 3, + 13, 13, 3, 13, 13, 3, 13, 13, + 3, 13, 13, 13, 13, 13, 13, 13, + 3, 13, 3, 13, 3, 13, 13, 3, + 13, 3, 13, 13, 3, 13, 3, 13, + 3, 243, 214, 244, 245, 246, 247, 248, + 249, 250, 251, 252, 100, 253, 18, 254, + 255, 256, 18, 257, 131, 258, 259, 260, + 261, 262, 263, 264, 265, 18, 3, 3, + 3, 13, 13, 13, 3, 13, 13, 3, + 13, 13, 3, 3, 3, 3, 3, 13, + 13, 13, 13, 3, 13, 13, 13, 13, + 13, 13, 3, 3, 3, 13, 13, 13, + 13, 13, 13, 13, 13, 13, 3, 13, + 13, 13, 13, 13, 13, 13, 13, 3, + 13, 13, 3, 3, 3, 3, 13, 13, + 13, 3, 3, 3, 13, 3, 3, 3, + 13, 13, 3, 13, 13, 13, 3, 13, + 3, 3, 3, 13, 13, 3, 13, 13, + 13, 3, 13, 13, 13, 3, 3, 3, + 3, 13, 18, 183, 266, 267, 16, 18, + 16, 3, 3, 13, 3, 13, 18, 266, + 16, 3, 18, 268, 16, 3, 3, 13, + 18, 269, 270, 271, 174, 272, 273, 18, + 274, 275, 276, 16, 3, 3, 13, 13, + 13, 3, 13, 13, 3, 13, 13, 13, + 13, 3, 3, 13, 3, 3, 13, 13, + 3, 13, 3, 18, 16, 3, 277, 18, + 278, 3, 16, 3, 13, 3, 13, 279, + 18, 280, 281, 3, 13, 3, 3, 3, + 13, 13, 13, 13, 3, 282, 283, 284, + 18, 285, 286, 287, 288, 289, 290, 291, + 292, 293, 294, 295, 296, 297, 298, 16, + 3, 13, 13, 13, 3, 3, 3, 3, + 13, 13, 3, 3, 13, 3, 3, 3, + 3, 3, 3, 3, 13, 3, 13, 3, + 3, 3, 3, 3, 3, 13, 13, 13, + 13, 13, 3, 3, 13, 3, 3, 3, + 13, 3, 3, 13, 3, 3, 13, 3, + 3, 13, 3, 3, 3, 13, 13, 13, + 3, 3, 3, 13, 13, 13, 13, 3, + 299, 18, 300, 18, 301, 302, 303, 304, + 16, 3, 13, 13, 13, 13, 13, 3, + 3, 3, 13, 3, 3, 13, 13, 13, + 13, 13, 13, 13, 13, 13, 13, 3, + 13, 13, 13, 13, 13, 13, 13, 13, + 13, 13, 13, 13, 13, 13, 13, 13, + 13, 13, 13, 3, 13, 13, 13, 13, + 13, 3, 305, 18, 16, 3, 13, 306, + 18, 102, 16, 3, 13, 307, 3, 16, + 3, 13, 18, 308, 16, 3, 3, 13, + 309, 3, 18, 310, 16, 3, 3, 13, + 13, 13, 13, 3, 13, 13, 13, 13, + 3, 13, 13, 13, 13, 13, 3, 3, + 13, 3, 13, 13, 13, 3, 13, 3, + 13, 13, 13, 3, 3, 3, 3, 3, + 3, 3, 13, 13, 13, 3, 13, 3, + 3, 3, 13, 13, 13, 13, 3, 311, + 312, 71, 313, 314, 315, 316, 317, 318, + 319, 320, 321, 322, 323, 324, 325, 326, + 327, 328, 329, 330, 331, 333, 334, 335, + 336, 337, 338, 332, 3, 13, 13, 13, + 13, 3, 13, 3, 13, 13, 3, 13, + 13, 13, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 13, 13, 13, 13, 13, + 3, 13, 13, 13, 13, 13, 13, 13, + 3, 13, 13, 13, 3, 13, 13, 13, + 13, 13, 13, 13, 3, 13, 13, 13, + 3, 13, 13, 13, 13, 13, 13, 13, + 3, 13, 13, 13, 3, 13, 13, 13, + 13, 13, 13, 13, 13, 13, 13, 3, + 13, 3, 13, 13, 13, 13, 13, 3, + 13, 13, 3, 13, 13, 13, 13, 13, + 13, 13, 3, 13, 13, 13, 3, 13, + 13, 13, 13, 3, 13, 13, 13, 13, + 3, 13, 13, 13, 13, 3, 13, 3, + 13, 13, 3, 13, 13, 13, 13, 13, + 13, 13, 13, 13, 13, 13, 13, 13, + 13, 3, 13, 13, 13, 3, 13, 3, + 13, 13, 3, 13, 3, 339, 340, 341, + 103, 104, 105, 106, 107, 342, 109, 110, + 111, 112, 113, 114, 343, 344, 169, 345, + 260, 119, 346, 121, 231, 271, 124, 347, + 348, 349, 350, 351, 352, 353, 354, 355, + 356, 133, 357, 18, 16, 17, 18, 136, + 137, 138, 139, 16, 16, 3, 13, 13, + 3, 13, 13, 13, 13, 13, 13, 3, + 3, 3, 13, 3, 13, 13, 13, 13, + 3, 13, 13, 13, 3, 13, 13, 3, + 13, 13, 13, 3, 3, 13, 13, 13, + 3, 3, 13, 13, 3, 13, 3, 13, + 3, 13, 13, 13, 3, 3, 13, 13, + 3, 13, 13, 3, 13, 13, 13, 3, + 358, 142, 144, 145, 146, 147, 148, 16, + 359, 150, 360, 152, 361, 3, 13, 13, + 3, 3, 3, 3, 13, 3, 3, 13, + 13, 13, 13, 13, 3, 362, 111, 363, + 156, 157, 16, 158, 159, 18, 16, 3, + 13, 13, 13, 13, 3, 3, 3, 13, + 18, 161, 162, 18, 364, 365, 221, 310, + 165, 166, 167, 366, 169, 367, 368, 369, + 370, 371, 372, 373, 374, 375, 376, 177, + 178, 17, 377, 18, 16, 3, 3, 3, + 3, 13, 13, 13, 3, 3, 3, 3, + 3, 13, 13, 3, 13, 13, 13, 3, + 13, 13, 3, 3, 3, 13, 13, 3, + 13, 13, 13, 13, 3, 13, 3, 13, + 13, 13, 13, 13, 3, 3, 3, 3, + 3, 13, 13, 13, 13, 13, 13, 3, + 13, 3, 18, 182, 183, 378, 185, 59, + 186, 187, 57, 188, 189, 379, 16, 192, + 380, 194, 195, 196, 16, 3, 13, 13, + 13, 13, 13, 13, 13, 3, 13, 13, + 3, 13, 3, 381, 382, 199, 200, 201, + 383, 203, 204, 384, 385, 386, 203, 208, + 209, 210, 211, 40, 3, 212, 213, 18, + 214, 215, 217, 387, 219, 388, 221, 222, + 18, 16, 389, 224, 225, 226, 18, 227, + 228, 229, 230, 231, 232, 233, 234, 390, + 236, 237, 391, 239, 240, 241, 18, 146, + 16, 242, 3, 3, 13, 3, 3, 13, + 3, 13, 13, 13, 13, 13, 3, 13, + 13, 3, 392, 393, 394, 395, 396, 397, + 398, 399, 249, 400, 321, 401, 215, 402, + 403, 404, 405, 406, 403, 407, 408, 409, + 260, 410, 262, 411, 412, 273, 3, 13, + 3, 13, 3, 13, 3, 13, 3, 13, + 13, 3, 13, 3, 13, 13, 13, 3, + 13, 13, 3, 3, 13, 13, 13, 3, + 13, 3, 13, 3, 13, 13, 3, 13, + 3, 13, 3, 13, 3, 13, 3, 13, + 3, 3, 3, 13, 13, 13, 3, 13, + 13, 3, 18, 269, 231, 413, 403, 414, + 273, 18, 415, 416, 276, 16, 3, 13, + 3, 13, 13, 13, 3, 3, 3, 13, + 13, 3, 279, 18, 280, 417, 3, 13, + 13, 3, 18, 285, 286, 287, 288, 289, + 290, 291, 292, 293, 294, 418, 16, 3, + 3, 3, 13, 18, 419, 18, 267, 302, + 303, 304, 16, 3, 3, 13, 421, 421, + 421, 421, 420, 421, 421, 421, 420, 421, + 420, 421, 421, 420, 420, 420, 420, 420, + 420, 421, 420, 420, 420, 420, 421, 421, + 421, 421, 421, 420, 420, 421, 420, 420, + 421, 420, 421, 420, 420, 421, 420, 420, + 420, 421, 421, 421, 421, 421, 421, 420, + 421, 421, 420, 421, 421, 420, 420, 420, + 420, 420, 420, 421, 421, 420, 420, 421, + 420, 421, 421, 421, 420, 422, 423, 424, + 425, 426, 427, 428, 429, 430, 431, 432, + 433, 434, 435, 436, 437, 438, 439, 440, + 441, 442, 443, 444, 445, 446, 447, 448, + 449, 450, 451, 452, 453, 420, 421, 420, + 421, 420, 421, 421, 420, 421, 421, 420, + 420, 420, 421, 420, 420, 420, 420, 420, + 420, 420, 421, 420, 420, 420, 420, 420, + 420, 420, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 420, 420, 420, + 420, 420, 420, 420, 420, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 420, 420, + 420, 420, 420, 420, 420, 420, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 420, + 421, 421, 421, 421, 421, 421, 421, 421, + 420, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 420, 421, 421, 421, + 421, 421, 421, 420, 421, 421, 421, 421, + 421, 421, 420, 420, 420, 420, 420, 420, + 420, 420, 421, 421, 421, 421, 421, 421, + 421, 421, 420, 421, 421, 421, 421, 421, + 421, 421, 421, 420, 421, 421, 421, 421, + 421, 420, 420, 420, 420, 420, 420, 420, + 420, 421, 421, 421, 421, 421, 421, 420, + 421, 421, 421, 421, 421, 421, 421, 420, + 421, 420, 421, 421, 420, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 420, 421, 421, 421, 421, 421, + 420, 421, 421, 421, 421, 421, 421, 421, + 420, 421, 421, 421, 420, 421, 421, 421, + 420, 421, 420, 454, 455, 456, 457, 458, + 459, 460, 461, 462, 463, 464, 465, 466, + 467, 468, 469, 470, 471, 472, 473, 474, + 475, 476, 477, 478, 479, 480, 481, 482, + 483, 484, 485, 486, 487, 488, 489, 426, + 490, 491, 492, 493, 494, 495, 426, 471, + 426, 420, 421, 420, 421, 421, 420, 420, + 421, 420, 420, 420, 420, 421, 420, 420, + 420, 420, 420, 421, 420, 420, 420, 420, + 420, 421, 421, 421, 421, 421, 420, 420, + 420, 421, 420, 420, 420, 421, 421, 421, + 420, 420, 420, 421, 421, 420, 420, 420, + 421, 421, 421, 420, 420, 420, 421, 421, + 421, 421, 420, 421, 421, 421, 421, 420, + 420, 420, 420, 420, 421, 421, 421, 421, + 420, 420, 421, 421, 421, 420, 420, 421, + 421, 421, 421, 420, 421, 421, 420, 421, + 421, 420, 420, 420, 421, 421, 421, 420, + 420, 420, 420, 421, 421, 421, 421, 421, + 420, 420, 420, 420, 421, 420, 421, 421, + 420, 421, 421, 420, 421, 420, 421, 421, + 421, 420, 421, 421, 420, 420, 420, 421, + 420, 420, 420, 420, 420, 420, 420, 421, + 421, 421, 421, 420, 421, 421, 421, 421, + 421, 421, 421, 420, 496, 497, 498, 499, + 500, 501, 502, 503, 504, 426, 505, 506, + 507, 508, 509, 420, 421, 420, 420, 420, + 420, 420, 421, 421, 420, 421, 421, 421, + 420, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 420, 421, 421, 421, 420, + 420, 421, 421, 421, 420, 420, 421, 420, + 420, 421, 421, 421, 421, 421, 420, 420, + 420, 420, 421, 421, 421, 421, 421, 421, + 420, 421, 421, 421, 421, 421, 420, 510, + 465, 511, 512, 513, 426, 514, 515, 471, + 426, 420, 421, 421, 421, 421, 420, 420, + 420, 421, 420, 420, 421, 421, 421, 420, + 420, 420, 421, 421, 420, 476, 420, 471, + 426, 426, 516, 420, 426, 420, 421, 471, + 517, 518, 471, 519, 520, 471, 521, 522, + 523, 524, 525, 526, 471, 527, 528, 529, + 471, 530, 531, 532, 490, 533, 534, 535, + 490, 536, 471, 426, 420, 420, 421, 421, + 420, 420, 420, 421, 421, 421, 421, 420, + 421, 421, 420, 420, 420, 420, 421, 421, + 420, 420, 421, 421, 420, 420, 420, 420, + 420, 420, 421, 421, 421, 420, 420, 420, + 421, 420, 420, 420, 421, 421, 420, 421, + 421, 421, 421, 420, 421, 421, 421, 421, + 420, 421, 421, 421, 421, 421, 421, 420, + 420, 420, 421, 421, 421, 421, 420, 537, + 538, 420, 426, 420, 421, 420, 420, 421, + 471, 539, 540, 541, 542, 521, 543, 544, + 545, 546, 547, 548, 549, 550, 551, 552, + 553, 554, 426, 420, 420, 421, 420, 421, + 421, 421, 421, 421, 421, 421, 420, 421, + 421, 421, 420, 421, 420, 420, 421, 420, + 421, 420, 420, 421, 421, 421, 421, 420, + 421, 421, 421, 420, 420, 421, 421, 421, + 421, 420, 421, 421, 420, 420, 421, 421, + 421, 421, 421, 420, 555, 556, 557, 558, + 559, 560, 561, 562, 563, 564, 565, 561, + 567, 568, 569, 570, 566, 420, 571, 572, + 471, 573, 574, 575, 576, 577, 578, 579, + 580, 581, 471, 426, 582, 583, 584, 585, + 471, 586, 587, 588, 589, 590, 591, 592, + 593, 594, 595, 596, 597, 598, 599, 600, + 471, 502, 426, 601, 420, 421, 421, 421, + 421, 421, 420, 420, 420, 421, 420, 421, + 421, 420, 421, 420, 421, 421, 420, 420, + 420, 421, 421, 421, 420, 420, 420, 421, + 421, 421, 420, 420, 420, 420, 421, 420, + 420, 421, 420, 420, 421, 421, 421, 420, + 420, 421, 420, 421, 421, 421, 420, 421, + 421, 421, 421, 421, 421, 420, 420, 420, + 421, 421, 420, 421, 421, 420, 421, 421, + 420, 421, 421, 420, 421, 421, 421, 421, + 421, 421, 421, 420, 421, 420, 421, 420, + 421, 421, 420, 421, 420, 421, 421, 420, + 421, 420, 421, 420, 602, 573, 603, 604, + 605, 606, 607, 608, 609, 610, 611, 454, + 612, 471, 613, 614, 615, 471, 616, 486, + 617, 618, 619, 620, 621, 622, 623, 624, + 471, 420, 420, 420, 421, 421, 421, 420, + 421, 421, 420, 421, 421, 420, 420, 420, + 420, 420, 421, 421, 421, 421, 420, 421, + 421, 421, 421, 421, 421, 420, 420, 420, 421, 421, 421, 421, 421, 421, 421, 421, - 422, 422, 422, 422, 422, 422, 422, 422, - 421, 422, 422, 422, 422, 422, 422, 422, - 422, 421, 422, 422, 422, 422, 422, 421, - 421, 421, 421, 421, 421, 421, 421, 422, - 422, 422, 422, 422, 422, 421, 422, 422, - 422, 422, 422, 422, 422, 421, 422, 421, - 422, 422, 421, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 421, 422, 422, 422, 422, 422, 421, 422, - 422, 422, 422, 422, 422, 422, 421, 422, - 422, 422, 421, 422, 422, 422, 421, 422, - 421, 455, 456, 457, 458, 459, 460, 461, - 462, 463, 464, 465, 466, 467, 468, 469, - 470, 471, 472, 473, 474, 475, 476, 477, - 478, 479, 480, 481, 482, 483, 484, 485, - 486, 487, 488, 489, 490, 427, 491, 492, - 493, 494, 495, 496, 427, 472, 427, 421, - 422, 421, 422, 422, 421, 421, 422, 421, - 421, 421, 421, 422, 421, 421, 421, 421, - 421, 422, 421, 421, 421, 421, 421, 422, - 422, 422, 422, 422, 421, 421, 421, 422, - 421, 421, 421, 422, 422, 422, 421, 421, - 421, 422, 422, 421, 421, 421, 422, 422, - 422, 421, 421, 421, 422, 422, 422, 422, - 421, 422, 422, 422, 422, 421, 421, 421, - 421, 421, 422, 422, 422, 422, 421, 421, - 422, 422, 422, 421, 421, 422, 422, 422, - 422, 421, 422, 422, 421, 422, 422, 421, - 421, 421, 422, 422, 422, 421, 421, 421, - 421, 422, 422, 422, 422, 422, 421, 421, - 421, 421, 422, 421, 422, 422, 421, 422, - 422, 421, 422, 421, 422, 422, 422, 421, - 422, 422, 421, 421, 421, 422, 421, 421, - 421, 421, 421, 421, 421, 422, 422, 422, - 422, 421, 422, 422, 422, 422, 422, 422, - 422, 421, 497, 498, 499, 500, 501, 502, - 503, 504, 505, 427, 506, 507, 508, 509, - 510, 421, 422, 421, 421, 421, 421, 421, - 422, 422, 421, 422, 422, 422, 421, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 421, 422, 422, 422, 421, 421, 422, - 422, 422, 421, 421, 422, 421, 421, 422, - 422, 422, 422, 422, 421, 421, 421, 421, - 422, 422, 422, 422, 422, 422, 421, 422, - 422, 422, 422, 422, 421, 511, 466, 512, - 513, 514, 427, 515, 516, 472, 427, 421, - 422, 422, 422, 422, 421, 421, 421, 422, - 421, 421, 422, 422, 422, 421, 421, 421, - 422, 422, 421, 477, 421, 472, 427, 427, - 517, 421, 427, 421, 422, 472, 518, 519, - 472, 520, 521, 472, 522, 523, 524, 525, - 526, 527, 472, 528, 529, 530, 472, 531, - 532, 533, 491, 534, 535, 536, 491, 537, - 472, 427, 421, 421, 422, 422, 421, 421, - 421, 422, 422, 422, 422, 421, 422, 422, - 421, 421, 421, 421, 422, 422, 421, 421, - 422, 422, 421, 421, 421, 421, 421, 421, - 422, 422, 422, 421, 421, 421, 422, 421, - 421, 421, 422, 422, 421, 422, 422, 422, - 422, 421, 422, 422, 422, 422, 421, 422, - 422, 422, 422, 422, 422, 421, 421, 421, - 422, 422, 422, 422, 421, 538, 539, 421, - 427, 421, 422, 421, 421, 422, 472, 540, - 541, 542, 543, 522, 544, 545, 546, 547, - 548, 549, 550, 551, 552, 553, 554, 555, - 427, 421, 421, 422, 421, 422, 422, 422, - 422, 422, 422, 422, 421, 422, 422, 422, - 421, 422, 421, 421, 422, 421, 422, 421, - 421, 422, 422, 422, 422, 421, 422, 422, - 422, 421, 421, 422, 422, 422, 422, 421, - 422, 422, 421, 421, 422, 422, 422, 422, - 422, 421, 556, 557, 558, 559, 560, 561, - 562, 563, 564, 565, 566, 562, 568, 569, - 570, 571, 567, 421, 572, 573, 472, 574, - 575, 576, 577, 578, 579, 580, 581, 582, - 472, 427, 583, 584, 585, 586, 472, 587, - 588, 589, 590, 591, 592, 593, 594, 595, - 596, 597, 598, 599, 600, 601, 472, 503, - 427, 602, 421, 422, 422, 422, 422, 422, - 421, 421, 421, 422, 421, 422, 422, 421, - 422, 421, 422, 422, 421, 421, 421, 422, - 422, 422, 421, 421, 421, 422, 422, 422, - 421, 421, 421, 421, 422, 421, 421, 422, - 421, 421, 422, 422, 422, 421, 421, 422, - 421, 422, 422, 422, 421, 422, 422, 422, - 422, 422, 422, 421, 421, 421, 422, 422, - 421, 422, 422, 421, 422, 422, 421, 422, - 422, 421, 422, 422, 422, 422, 422, 422, - 422, 421, 422, 421, 422, 421, 422, 422, - 421, 422, 421, 422, 422, 421, 422, 421, - 422, 421, 603, 574, 604, 605, 606, 607, - 608, 609, 610, 611, 612, 455, 613, 472, - 614, 615, 616, 472, 617, 487, 618, 619, - 620, 621, 622, 623, 624, 625, 472, 421, - 421, 421, 422, 422, 422, 421, 422, 422, - 421, 422, 422, 421, 421, 421, 421, 421, - 422, 422, 422, 422, 421, 422, 422, 422, - 422, 422, 422, 421, 421, 421, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 421, - 422, 422, 422, 422, 422, 422, 422, 422, - 421, 422, 422, 421, 421, 421, 421, 422, - 422, 422, 421, 421, 421, 422, 421, 421, - 421, 422, 422, 421, 422, 422, 422, 421, - 422, 421, 421, 421, 422, 422, 421, 422, - 422, 422, 421, 422, 422, 422, 421, 421, - 421, 421, 422, 472, 541, 626, 627, 427, - 472, 427, 421, 421, 422, 421, 422, 472, - 626, 427, 421, 472, 628, 427, 421, 421, - 422, 472, 629, 630, 631, 532, 632, 633, - 472, 634, 635, 636, 427, 421, 421, 422, - 422, 422, 421, 422, 422, 421, 422, 422, - 422, 422, 421, 421, 422, 421, 421, 422, - 422, 421, 422, 421, 472, 427, 421, 637, - 472, 638, 421, 427, 421, 422, 421, 422, - 639, 472, 640, 641, 421, 422, 421, 421, - 421, 422, 422, 422, 422, 421, 642, 643, - 644, 472, 645, 646, 647, 648, 649, 650, - 651, 652, 653, 654, 655, 656, 657, 658, - 427, 421, 422, 422, 422, 421, 421, 421, - 421, 422, 422, 421, 421, 422, 421, 421, - 421, 421, 421, 421, 421, 422, 421, 422, - 421, 421, 421, 421, 421, 421, 422, 422, - 422, 422, 422, 421, 421, 422, 421, 421, - 421, 422, 421, 421, 422, 421, 421, 422, - 421, 421, 422, 421, 421, 421, 422, 422, - 422, 421, 421, 421, 422, 422, 422, 422, - 421, 659, 472, 660, 472, 661, 662, 663, - 664, 427, 421, 422, 422, 422, 422, 422, - 421, 421, 421, 422, 421, 421, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 421, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 421, 422, 422, 422, - 422, 422, 421, 665, 472, 427, 421, 422, - 666, 472, 457, 427, 421, 422, 667, 421, - 427, 421, 422, 472, 668, 427, 421, 421, - 422, 669, 421, 472, 670, 427, 421, 421, - 422, 672, 671, 422, 422, 422, 422, 672, - 671, 422, 672, 671, 672, 672, 422, 672, - 671, 422, 672, 422, 672, 671, 422, 672, - 422, 672, 422, 671, 672, 672, 672, 672, - 672, 672, 672, 672, 671, 422, 422, 672, - 672, 422, 672, 422, 672, 671, 672, 672, - 672, 672, 672, 422, 672, 422, 672, 422, - 672, 671, 672, 672, 422, 672, 422, 672, - 671, 672, 672, 672, 672, 672, 422, 672, - 422, 672, 671, 422, 422, 672, 422, 672, - 671, 672, 672, 672, 422, 672, 422, 672, - 422, 672, 422, 672, 671, 672, 422, 672, - 422, 672, 671, 422, 672, 672, 672, 672, - 422, 672, 422, 672, 422, 672, 422, 672, - 422, 672, 422, 672, 671, 422, 672, 671, - 672, 672, 672, 422, 672, 422, 672, 671, - 672, 422, 672, 422, 672, 671, 422, 672, - 672, 672, 672, 422, 672, 422, 672, 671, - 422, 672, 422, 672, 422, 672, 671, 672, - 672, 422, 672, 422, 672, 671, 422, 672, - 422, 672, 422, 672, 422, 671, 672, 672, - 672, 422, 672, 422, 672, 671, 422, 672, - 671, 672, 672, 422, 672, 671, 672, 672, - 672, 422, 672, 672, 672, 672, 672, 672, - 422, 422, 672, 422, 672, 422, 672, 422, - 672, 671, 672, 422, 672, 422, 672, 671, - 422, 672, 671, 672, 422, 672, 671, 672, - 422, 672, 671, 422, 422, 672, 671, 422, - 672, 422, 672, 422, 672, 422, 672, 422, - 672, 422, 671, 672, 672, 422, 672, 672, - 672, 672, 422, 422, 672, 672, 672, 672, - 672, 422, 672, 672, 672, 672, 672, 671, - 422, 672, 672, 422, 672, 422, 671, 672, - 672, 422, 672, 671, 422, 422, 672, 422, - 671, 672, 672, 671, 422, 672, 422, 671, - 672, 671, 422, 672, 422, 672, 422, 671, - 672, 672, 671, 422, 672, 422, 672, 422, - 672, 671, 672, 422, 672, 422, 672, 671, - 422, 672, 671, 422, 422, 672, 671, 672, - 422, 671, 672, 671, 422, 672, 422, 672, - 422, 671, 672, 671, 422, 422, 672, 671, - 672, 422, 672, 422, 672, 671, 422, 672, - 422, 671, 672, 671, 422, 422, 672, 422, - 671, 672, 671, 422, 422, 672, 671, 672, - 422, 672, 671, 672, 422, 672, 671, 672, - 422, 672, 422, 672, 422, 671, 672, 671, - 422, 422, 672, 671, 672, 422, 672, 422, - 672, 671, 422, 672, 671, 672, 672, 422, - 672, 422, 672, 671, 671, 422, 671, 422, - 672, 672, 422, 672, 672, 672, 672, 672, - 672, 672, 671, 422, 672, 672, 672, 422, - 671, 672, 672, 672, 422, 672, 422, 672, - 422, 672, 422, 672, 422, 672, 671, 422, - 422, 672, 671, 672, 422, 672, 671, 422, - 422, 672, 422, 422, 422, 672, 422, 672, - 422, 672, 422, 672, 422, 671, 422, 672, - 422, 672, 422, 671, 672, 671, 422, 672, - 422, 671, 672, 422, 672, 672, 672, 671, - 422, 672, 422, 422, 672, 422, 671, 672, - 672, 671, 422, 672, 672, 672, 672, 422, - 672, 422, 671, 672, 672, 672, 422, 672, - 671, 672, 422, 672, 422, 672, 422, 672, - 422, 672, 671, 672, 672, 422, 672, 671, - 422, 672, 422, 672, 422, 671, 672, 672, - 671, 422, 672, 422, 671, 672, 671, 422, - 672, 671, 422, 672, 422, 672, 671, 672, - 672, 672, 671, 422, 422, 422, 672, 671, - 422, 672, 422, 671, 672, 671, 422, 672, - 422, 672, 422, 671, 672, 672, 672, 671, - 422, 672, 422, 671, 672, 672, 672, 672, - 671, 422, 672, 422, 672, 671, 422, 422, - 672, 422, 672, 671, 672, 422, 672, 422, - 671, 672, 672, 671, 422, 672, 422, 672, - 671, 422, 672, 672, 672, 422, 672, 422, - 671, 422, 672, 671, 672, 422, 422, 672, - 422, 672, 422, 671, 672, 672, 672, 672, - 671, 422, 672, 422, 672, 422, 672, 422, - 672, 422, 672, 671, 672, 672, 672, 422, - 672, 422, 672, 422, 672, 422, 671, 672, - 672, 422, 422, 672, 671, 672, 422, 672, - 672, 671, 422, 672, 422, 672, 671, 422, - 422, 672, 672, 672, 672, 422, 672, 422, - 672, 422, 671, 672, 672, 422, 671, 672, - 671, 422, 672, 422, 671, 672, 671, 422, - 672, 422, 671, 672, 422, 672, 672, 671, - 422, 672, 672, 422, 671, 672, 671, 422, - 672, 422, 672, 671, 672, 422, 672, 422, - 671, 672, 671, 422, 672, 422, 672, 422, - 672, 422, 672, 422, 672, 671, 673, 671, - 674, 675, 676, 677, 678, 679, 680, 681, - 682, 683, 684, 676, 685, 686, 687, 688, - 689, 676, 690, 691, 692, 693, 694, 695, - 696, 697, 698, 699, 700, 701, 702, 703, - 704, 676, 705, 673, 685, 673, 706, 673, - 671, 672, 672, 672, 672, 422, 671, 672, - 672, 671, 422, 672, 671, 422, 422, 672, - 671, 422, 672, 422, 671, 672, 671, 422, - 422, 672, 422, 671, 672, 672, 671, 422, - 672, 672, 672, 671, 422, 672, 422, 672, - 672, 671, 422, 422, 672, 422, 671, 672, - 671, 422, 672, 671, 422, 422, 672, 422, - 672, 671, 422, 672, 422, 422, 672, 422, - 672, 422, 671, 672, 672, 671, 422, 672, - 672, 422, 672, 671, 422, 672, 422, 672, - 671, 422, 672, 422, 671, 422, 672, 672, - 672, 422, 672, 671, 672, 422, 672, 671, - 422, 672, 671, 672, 422, 672, 671, 422, - 672, 671, 422, 672, 422, 672, 671, 422, - 672, 671, 422, 672, 671, 707, 708, 709, - 710, 711, 712, 713, 714, 715, 716, 717, - 718, 678, 719, 720, 721, 722, 723, 720, - 724, 725, 726, 727, 728, 729, 730, 731, - 732, 673, 671, 672, 422, 672, 671, 672, - 422, 672, 671, 672, 422, 672, 671, 672, - 422, 672, 671, 422, 672, 422, 672, 671, - 672, 422, 672, 671, 672, 422, 422, 422, - 672, 671, 672, 422, 672, 671, 672, 672, - 672, 672, 422, 672, 422, 671, 672, 671, - 422, 422, 672, 422, 672, 671, 672, 422, - 672, 671, 422, 672, 671, 672, 672, 422, - 672, 671, 422, 672, 671, 672, 422, 672, - 671, 422, 672, 671, 422, 672, 671, 422, - 672, 671, 672, 671, 422, 422, 672, 671, - 672, 422, 672, 671, 422, 672, 422, 671, - 672, 671, 422, 676, 733, 673, 676, 734, - 676, 735, 685, 673, 671, 672, 671, 422, - 672, 671, 422, 676, 734, 685, 673, 671, - 676, 736, 673, 685, 673, 671, 672, 671, - 422, 676, 737, 694, 738, 720, 739, 732, - 676, 740, 741, 742, 673, 685, 673, 671, - 672, 671, 422, 672, 422, 672, 671, 422, - 672, 422, 672, 422, 671, 672, 672, 671, - 422, 672, 422, 672, 671, 422, 672, 671, - 676, 685, 427, 671, 743, 676, 744, 685, - 673, 671, 427, 672, 671, 422, 672, 671, - 422, 745, 676, 746, 747, 673, 671, 422, - 672, 671, 672, 672, 671, 422, 422, 672, - 422, 672, 671, 676, 748, 749, 750, 751, - 752, 753, 754, 755, 756, 757, 758, 673, - 685, 673, 671, 672, 422, 672, 672, 672, - 672, 672, 672, 672, 422, 672, 422, 672, - 672, 672, 672, 672, 672, 671, 422, 672, - 672, 422, 672, 422, 671, 672, 422, 672, - 672, 672, 422, 672, 672, 422, 672, 672, - 422, 672, 672, 422, 672, 672, 671, 422, - 676, 759, 676, 735, 760, 761, 762, 673, - 685, 673, 671, 672, 671, 422, 672, 672, - 672, 422, 672, 672, 672, 422, 672, 422, - 672, 671, 422, 422, 422, 422, 672, 672, - 422, 422, 422, 422, 422, 672, 672, 672, - 672, 672, 672, 672, 422, 672, 422, 672, - 422, 671, 672, 672, 672, 422, 672, 422, - 672, 671, 685, 427, 763, 676, 685, 427, - 672, 671, 422, 764, 676, 765, 685, 427, - 672, 671, 422, 672, 422, 766, 685, 673, - 671, 427, 672, 671, 422, 676, 767, 673, - 685, 673, 671, 672, 671, 422, 768, 769, - 769, 768, 770, 768, 771, 768, 769, 772, - 773, 772, 775, 774, 776, 777, 777, 774, - 778, 774, 779, 776, 780, 777, 781, 777, - 783, 782, 784, 785, 785, 782, 786, 782, - 787, 784, 788, 785, 789, 785, 791, 791, - 791, 791, 790, 791, 791, 791, 790, 791, - 790, 791, 791, 790, 790, 790, 790, 790, - 790, 791, 790, 790, 790, 790, 791, 791, - 791, 791, 791, 790, 790, 791, 790, 790, - 791, 790, 791, 790, 790, 791, 790, 790, - 790, 791, 791, 791, 791, 791, 791, 790, - 791, 791, 790, 791, 791, 790, 790, 790, - 790, 790, 790, 791, 791, 790, 790, 791, - 790, 791, 791, 791, 790, 793, 794, 795, - 796, 797, 798, 799, 800, 801, 802, 803, - 804, 805, 806, 807, 808, 809, 810, 811, - 812, 813, 814, 815, 816, 817, 818, 819, - 820, 821, 822, 823, 824, 790, 791, 790, - 791, 790, 791, 791, 790, 791, 791, 790, - 790, 790, 791, 790, 790, 790, 790, 790, - 790, 790, 791, 790, 790, 790, 790, 790, - 790, 790, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 790, 790, 790, - 790, 790, 790, 790, 790, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 790, 790, - 790, 790, 790, 790, 790, 790, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 790, - 791, 791, 791, 791, 791, 791, 791, 791, - 790, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 790, 791, 791, 791, - 791, 791, 791, 790, 791, 791, 791, 791, - 791, 791, 790, 790, 790, 790, 790, 790, - 790, 790, 791, 791, 791, 791, 791, 791, - 791, 791, 790, 791, 791, 791, 791, 791, - 791, 791, 791, 790, 791, 791, 791, 791, - 791, 790, 790, 790, 790, 790, 790, 790, - 790, 791, 791, 791, 791, 791, 791, 790, - 791, 791, 791, 791, 791, 791, 791, 790, - 791, 790, 791, 791, 790, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 790, 791, 791, 791, 791, 791, - 790, 791, 791, 791, 791, 791, 791, 791, - 790, 791, 791, 791, 790, 791, 791, 791, - 790, 791, 790, 825, 826, 827, 828, 829, - 830, 831, 832, 833, 834, 835, 836, 837, - 838, 839, 840, 841, 842, 843, 844, 845, - 846, 847, 848, 849, 850, 851, 852, 853, - 854, 855, 856, 857, 858, 859, 860, 797, - 861, 862, 863, 864, 865, 866, 797, 842, - 797, 790, 791, 790, 791, 791, 790, 790, - 791, 790, 790, 790, 790, 791, 790, 790, - 790, 790, 790, 791, 790, 790, 790, 790, - 790, 791, 791, 791, 791, 791, 790, 790, - 790, 791, 790, 790, 790, 791, 791, 791, - 790, 790, 790, 791, 791, 790, 790, 790, - 791, 791, 791, 790, 790, 790, 791, 791, - 791, 791, 790, 791, 791, 791, 791, 790, - 790, 790, 790, 790, 791, 791, 791, 791, - 790, 790, 791, 791, 791, 790, 790, 791, - 791, 791, 791, 790, 791, 791, 790, 791, - 791, 790, 790, 790, 791, 791, 791, 790, - 790, 790, 790, 791, 791, 791, 791, 791, - 790, 790, 790, 790, 791, 790, 791, 791, - 790, 791, 791, 790, 791, 790, 791, 791, - 791, 790, 791, 791, 790, 790, 790, 791, - 790, 790, 790, 790, 790, 790, 790, 791, - 791, 791, 791, 790, 791, 791, 791, 791, - 791, 791, 791, 790, 867, 868, 869, 870, - 871, 872, 873, 874, 875, 797, 876, 877, - 878, 879, 880, 790, 791, 790, 790, 790, - 790, 790, 791, 791, 790, 791, 791, 791, - 790, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 790, 791, 791, 791, 790, - 790, 791, 791, 791, 790, 790, 791, 790, - 790, 791, 791, 791, 791, 791, 790, 790, - 790, 790, 791, 791, 791, 791, 791, 791, - 790, 791, 791, 791, 791, 791, 790, 881, - 836, 882, 883, 884, 797, 885, 886, 842, - 797, 790, 791, 791, 791, 791, 790, 790, - 790, 791, 790, 790, 791, 791, 791, 790, - 790, 790, 791, 791, 790, 847, 790, 842, - 797, 797, 887, 790, 797, 790, 791, 842, - 888, 889, 842, 890, 891, 842, 892, 893, - 894, 895, 896, 897, 842, 898, 899, 900, - 842, 901, 902, 903, 861, 904, 905, 906, - 861, 907, 842, 797, 790, 790, 791, 791, - 790, 790, 790, 791, 791, 791, 791, 790, - 791, 791, 790, 790, 790, 790, 791, 791, - 790, 790, 791, 791, 790, 790, 790, 790, - 790, 790, 791, 791, 791, 790, 790, 790, - 791, 790, 790, 790, 791, 791, 790, 791, - 791, 791, 791, 790, 791, 791, 791, 791, - 790, 791, 791, 791, 791, 791, 791, 790, - 790, 790, 791, 791, 791, 791, 790, 908, - 909, 790, 797, 790, 791, 790, 790, 791, - 842, 910, 911, 912, 913, 892, 914, 915, - 916, 917, 918, 919, 920, 921, 922, 923, - 924, 925, 797, 790, 790, 791, 790, 791, - 791, 791, 791, 791, 791, 791, 790, 791, - 791, 791, 790, 791, 790, 790, 791, 790, - 791, 790, 790, 791, 791, 791, 791, 790, - 791, 791, 791, 790, 790, 791, 791, 791, - 791, 790, 791, 791, 790, 790, 791, 791, - 791, 791, 791, 790, 926, 927, 928, 929, - 930, 931, 932, 933, 934, 935, 936, 932, - 938, 939, 940, 941, 937, 790, 942, 943, - 842, 944, 945, 946, 947, 948, 949, 950, - 951, 952, 842, 797, 953, 954, 955, 956, - 842, 957, 958, 959, 960, 961, 962, 963, - 964, 965, 966, 967, 968, 969, 970, 971, - 842, 873, 797, 972, 790, 791, 791, 791, - 791, 791, 790, 790, 790, 791, 790, 791, - 791, 790, 791, 790, 791, 791, 790, 790, - 790, 791, 791, 791, 790, 790, 790, 791, - 791, 791, 790, 790, 790, 790, 791, 790, - 790, 791, 790, 790, 791, 791, 791, 790, - 790, 791, 790, 791, 791, 791, 790, 791, - 791, 791, 791, 791, 791, 790, 790, 790, - 791, 791, 790, 791, 791, 790, 791, 791, - 790, 791, 791, 790, 791, 791, 791, 791, - 791, 791, 791, 790, 791, 790, 791, 790, - 791, 791, 790, 791, 790, 791, 791, 790, - 791, 790, 791, 790, 973, 944, 974, 975, - 976, 977, 978, 979, 980, 981, 982, 825, - 983, 842, 984, 985, 986, 842, 987, 857, - 988, 989, 990, 991, 992, 993, 994, 995, - 842, 790, 790, 790, 791, 791, 791, 790, - 791, 791, 790, 791, 791, 790, 790, 790, - 790, 790, 791, 791, 791, 791, 790, 791, - 791, 791, 791, 791, 791, 790, 790, 790, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 790, 791, 791, 791, 791, 791, 791, - 791, 791, 790, 791, 791, 790, 790, 790, - 790, 791, 791, 791, 790, 790, 790, 791, - 790, 790, 790, 791, 791, 790, 791, 791, - 791, 790, 791, 790, 790, 790, 791, 791, - 790, 791, 791, 791, 790, 791, 791, 791, - 790, 790, 790, 790, 791, 842, 911, 996, - 997, 797, 842, 797, 790, 790, 791, 790, - 791, 842, 996, 797, 790, 842, 998, 797, - 790, 790, 791, 842, 999, 1000, 1001, 902, - 1002, 1003, 842, 1004, 1005, 1006, 797, 790, - 790, 791, 791, 791, 790, 791, 791, 790, - 791, 791, 791, 791, 790, 790, 791, 790, - 790, 791, 791, 790, 791, 790, 842, 797, - 790, 1007, 842, 1008, 790, 797, 790, 791, - 790, 791, 1009, 842, 1010, 1011, 790, 791, - 790, 790, 790, 791, 791, 791, 791, 790, - 1012, 1013, 1014, 842, 1015, 1016, 1017, 1018, - 1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026, - 1027, 1028, 797, 790, 791, 791, 791, 790, - 790, 790, 790, 791, 791, 790, 790, 791, - 790, 790, 790, 790, 790, 790, 790, 791, - 790, 791, 790, 790, 790, 790, 790, 790, - 791, 791, 791, 791, 791, 790, 790, 791, - 790, 790, 790, 791, 790, 790, 791, 790, - 790, 791, 790, 790, 791, 790, 790, 790, - 791, 791, 791, 790, 790, 790, 791, 791, - 791, 791, 790, 1029, 842, 1030, 842, 1031, - 1032, 1033, 1034, 797, 790, 791, 791, 791, - 791, 791, 790, 790, 790, 791, 790, 790, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 790, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 790, 791, - 791, 791, 791, 791, 790, 1035, 842, 797, - 790, 791, 1036, 842, 827, 797, 790, 791, - 1037, 790, 797, 790, 791, 842, 1038, 797, - 790, 790, 791, 1039, 790, 842, 1040, 797, - 790, 790, 791, 1042, 1041, 791, 791, 791, - 791, 1042, 1041, 791, 1042, 1041, 1042, 1042, - 791, 1042, 1041, 791, 1042, 791, 1042, 1041, - 791, 1042, 791, 1042, 791, 1041, 1042, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 1041, 791, - 791, 1042, 1042, 791, 1042, 791, 1042, 1041, - 1042, 1042, 1042, 1042, 1042, 791, 1042, 791, - 1042, 791, 1042, 1041, 1042, 1042, 791, 1042, - 791, 1042, 1041, 1042, 1042, 1042, 1042, 1042, - 791, 1042, 791, 1042, 1041, 791, 791, 1042, - 791, 1042, 1041, 1042, 1042, 1042, 791, 1042, - 791, 1042, 791, 1042, 791, 1042, 1041, 1042, - 791, 1042, 791, 1042, 1041, 791, 1042, 1042, - 1042, 1042, 791, 1042, 791, 1042, 791, 1042, - 791, 1042, 791, 1042, 791, 1042, 1041, 791, - 1042, 1041, 1042, 1042, 1042, 791, 1042, 791, - 1042, 1041, 1042, 791, 1042, 791, 1042, 1041, - 791, 1042, 1042, 1042, 1042, 791, 1042, 791, - 1042, 1041, 791, 1042, 791, 1042, 791, 1042, - 1041, 1042, 1042, 791, 1042, 791, 1042, 1041, - 791, 1042, 791, 1042, 791, 1042, 791, 1041, - 1042, 1042, 1042, 791, 1042, 791, 1042, 1041, - 791, 1042, 1041, 1042, 1042, 791, 1042, 1041, - 1042, 1042, 1042, 791, 1042, 1042, 1042, 1042, - 1042, 1042, 791, 791, 1042, 791, 1042, 791, - 1042, 791, 1042, 1041, 1042, 791, 1042, 791, - 1042, 1041, 791, 1042, 1041, 1042, 791, 1042, - 1041, 1042, 791, 1042, 1041, 791, 791, 1042, - 1041, 791, 1042, 791, 1042, 791, 1042, 791, - 1042, 791, 1042, 791, 1041, 1042, 1042, 791, - 1042, 1042, 1042, 1042, 791, 791, 1042, 1042, - 1042, 1042, 1042, 791, 1042, 1042, 1042, 1042, - 1042, 1041, 791, 1042, 1042, 791, 1042, 791, - 1041, 1042, 1042, 791, 1042, 1041, 791, 791, - 1042, 791, 1041, 1042, 1042, 1041, 791, 1042, - 791, 1041, 1042, 1041, 791, 1042, 791, 1042, - 791, 1041, 1042, 1042, 1041, 791, 1042, 791, - 1042, 791, 1042, 1041, 1042, 791, 1042, 791, - 1042, 1041, 791, 1042, 1041, 791, 791, 1042, - 1041, 1042, 791, 1041, 1042, 1041, 791, 1042, - 791, 1042, 791, 1041, 1042, 1041, 791, 791, - 1042, 1041, 1042, 791, 1042, 791, 1042, 1041, - 791, 1042, 791, 1041, 1042, 1041, 791, 791, - 1042, 791, 1041, 1042, 1041, 791, 791, 1042, - 1041, 1042, 791, 1042, 1041, 1042, 791, 1042, - 1041, 1042, 791, 1042, 791, 1042, 791, 1041, - 1042, 1041, 791, 791, 1042, 1041, 1042, 791, - 1042, 791, 1042, 1041, 791, 1042, 1041, 1042, - 1042, 791, 1042, 791, 1042, 1041, 1041, 791, - 1041, 791, 1042, 1042, 791, 1042, 1042, 1042, - 1042, 1042, 1042, 1042, 1041, 791, 1042, 1042, - 1042, 791, 1041, 1042, 1042, 1042, 791, 1042, - 791, 1042, 791, 1042, 791, 1042, 791, 1042, - 1041, 791, 791, 1042, 1041, 1042, 791, 1042, - 1041, 791, 791, 1042, 791, 791, 791, 1042, - 791, 1042, 791, 1042, 791, 1042, 791, 1041, - 791, 1042, 791, 1042, 791, 1041, 1042, 1041, - 791, 1042, 791, 1041, 1042, 791, 1042, 1042, - 1042, 1041, 791, 1042, 791, 791, 1042, 791, - 1041, 1042, 1042, 1041, 791, 1042, 1042, 1042, - 1042, 791, 1042, 791, 1041, 1042, 1042, 1042, - 791, 1042, 1041, 1042, 791, 1042, 791, 1042, - 791, 1042, 791, 1042, 1041, 1042, 1042, 791, - 1042, 1041, 791, 1042, 791, 1042, 791, 1041, - 1042, 1042, 1041, 791, 1042, 791, 1041, 1042, - 1041, 791, 1042, 1041, 791, 1042, 791, 1042, - 1041, 1042, 1042, 1042, 1041, 791, 791, 791, - 1042, 1041, 791, 1042, 791, 1041, 1042, 1041, - 791, 1042, 791, 1042, 791, 1041, 1042, 1042, - 1042, 1041, 791, 1042, 791, 1041, 1042, 1042, - 1042, 1042, 1041, 791, 1042, 791, 1042, 1041, - 791, 791, 1042, 791, 1042, 1041, 1042, 791, - 1042, 791, 1041, 1042, 1042, 1041, 791, 1042, - 791, 1042, 1041, 791, 1042, 1042, 1042, 791, - 1042, 791, 1041, 791, 1042, 1041, 1042, 791, - 791, 1042, 791, 1042, 791, 1041, 1042, 1042, - 1042, 1042, 1041, 791, 1042, 791, 1042, 791, - 1042, 791, 1042, 791, 1042, 1041, 1042, 1042, - 1042, 791, 1042, 791, 1042, 791, 1042, 791, - 1041, 1042, 1042, 791, 791, 1042, 1041, 1042, - 791, 1042, 1042, 1041, 791, 1042, 791, 1042, - 1041, 791, 791, 1042, 1042, 1042, 1042, 791, - 1042, 791, 1042, 791, 1041, 1042, 1042, 791, - 1041, 1042, 1041, 791, 1042, 791, 1041, 1042, - 1041, 791, 1042, 791, 1041, 1042, 791, 1042, - 1042, 1041, 791, 1042, 1042, 791, 1041, 1042, - 1041, 791, 1042, 791, 1042, 1041, 1042, 791, - 1042, 791, 1041, 1042, 1041, 791, 1042, 791, - 1042, 791, 1042, 791, 1042, 791, 1042, 1041, - 1043, 1041, 1044, 1045, 1046, 1047, 1048, 1049, - 1050, 1051, 1052, 1053, 1054, 1046, 1055, 1056, - 1057, 1058, 1059, 1046, 1060, 1061, 1062, 1063, - 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071, - 1072, 1073, 1074, 1046, 1075, 1043, 1055, 1043, - 1076, 1043, 1041, 1042, 1042, 1042, 1042, 791, - 1041, 1042, 1042, 1041, 791, 1042, 1041, 791, - 791, 1042, 1041, 791, 1042, 791, 1041, 1042, - 1041, 791, 791, 1042, 791, 1041, 1042, 1042, - 1041, 791, 1042, 1042, 1042, 1041, 791, 1042, - 791, 1042, 1042, 1041, 791, 791, 1042, 791, - 1041, 1042, 1041, 791, 1042, 1041, 791, 791, - 1042, 791, 1042, 1041, 791, 1042, 791, 791, - 1042, 791, 1042, 791, 1041, 1042, 1042, 1041, - 791, 1042, 1042, 791, 1042, 1041, 791, 1042, - 791, 1042, 1041, 791, 1042, 791, 1041, 791, - 1042, 1042, 1042, 791, 1042, 1041, 1042, 791, - 1042, 1041, 791, 1042, 1041, 1042, 791, 1042, - 1041, 791, 1042, 1041, 791, 1042, 791, 1042, - 1041, 791, 1042, 1041, 791, 1042, 1041, 1077, - 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, - 1086, 1087, 1088, 1048, 1089, 1090, 1091, 1092, - 1093, 1090, 1094, 1095, 1096, 1097, 1098, 1099, - 1100, 1101, 1102, 1043, 1041, 1042, 791, 1042, - 1041, 1042, 791, 1042, 1041, 1042, 791, 1042, - 1041, 1042, 791, 1042, 1041, 791, 1042, 791, - 1042, 1041, 1042, 791, 1042, 1041, 1042, 791, - 791, 791, 1042, 1041, 1042, 791, 1042, 1041, - 1042, 1042, 1042, 1042, 791, 1042, 791, 1041, - 1042, 1041, 791, 791, 1042, 791, 1042, 1041, - 1042, 791, 1042, 1041, 791, 1042, 1041, 1042, - 1042, 791, 1042, 1041, 791, 1042, 1041, 1042, - 791, 1042, 1041, 791, 1042, 1041, 791, 1042, - 1041, 791, 1042, 1041, 1042, 1041, 791, 791, - 1042, 1041, 1042, 791, 1042, 1041, 791, 1042, - 791, 1041, 1042, 1041, 791, 1046, 1103, 1043, - 1046, 1104, 1046, 1105, 1055, 1043, 1041, 1042, - 1041, 791, 1042, 1041, 791, 1046, 1104, 1055, - 1043, 1041, 1046, 1106, 1043, 1055, 1043, 1041, - 1042, 1041, 791, 1046, 1107, 1064, 1108, 1090, - 1109, 1102, 1046, 1110, 1111, 1112, 1043, 1055, - 1043, 1041, 1042, 1041, 791, 1042, 791, 1042, - 1041, 791, 1042, 791, 1042, 791, 1041, 1042, - 1042, 1041, 791, 1042, 791, 1042, 1041, 791, - 1042, 1041, 1046, 1055, 797, 1041, 1113, 1046, - 1114, 1055, 1043, 1041, 797, 1042, 1041, 791, - 1042, 1041, 791, 1115, 1046, 1116, 1117, 1043, - 1041, 791, 1042, 1041, 1042, 1042, 1041, 791, - 791, 1042, 791, 1042, 1041, 1046, 1118, 1119, - 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, - 1128, 1043, 1055, 1043, 1041, 1042, 791, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 791, 1042, - 791, 1042, 1042, 1042, 1042, 1042, 1042, 1041, - 791, 1042, 1042, 791, 1042, 791, 1041, 1042, - 791, 1042, 1042, 1042, 791, 1042, 1042, 791, - 1042, 1042, 791, 1042, 1042, 791, 1042, 1042, - 1041, 791, 1046, 1129, 1046, 1105, 1130, 1131, - 1132, 1043, 1055, 1043, 1041, 1042, 1041, 791, - 1042, 1042, 1042, 791, 1042, 1042, 1042, 791, - 1042, 791, 1042, 1041, 791, 791, 791, 791, - 1042, 1042, 791, 791, 791, 791, 791, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 791, 1042, - 791, 1042, 791, 1041, 1042, 1042, 1042, 791, - 1042, 791, 1042, 1041, 1055, 797, 1133, 1046, - 1055, 797, 1042, 1041, 791, 1134, 1046, 1135, - 1055, 797, 1042, 1041, 791, 1042, 791, 1136, - 1055, 1043, 1041, 797, 1042, 1041, 791, 1046, - 1137, 1043, 1055, 1043, 1041, 1042, 1041, 791, - 1138, 1139, 1140, 1138, 1141, 1142, 1143, 1144, - 1146, 1147, 1148, 1149, 1150, 672, 672, 672, - 1151, 1152, 1153, 1154, 672, 1157, 1158, 1160, - 1161, 1162, 1156, 1163, 1164, 1165, 1166, 1167, - 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175, - 1176, 1177, 1178, 1179, 1180, 1181, 1182, 1184, - 1185, 1186, 1187, 1188, 1189, 672, 1145, 10, - 1145, 422, 1145, 422, 1156, 1159, 1183, 1190, - 1155, 1138, 1138, 1191, 1139, 1192, 1194, 1193, - 2, 1, 1195, 1193, 1196, 1193, 5, 1, - 1193, 6, 5, 9, 11, 11, 10, 1198, - 1199, 1200, 1193, 1201, 1202, 1193, 1203, 1193, - 422, 422, 1205, 1206, 491, 472, 1207, 472, - 1208, 1209, 1210, 1211, 1212, 1213, 1214, 1215, - 1216, 1217, 1218, 546, 1219, 522, 1220, 1221, - 1222, 1223, 1224, 1225, 1226, 1227, 1228, 1229, - 1230, 1231, 422, 422, 422, 427, 567, 1204, - 1232, 1193, 1233, 1193, 672, 1234, 422, 422, - 422, 672, 1234, 672, 672, 422, 1234, 422, - 1234, 422, 1234, 422, 672, 672, 672, 672, - 672, 1234, 422, 672, 672, 672, 422, 672, - 422, 1234, 422, 672, 672, 672, 672, 422, - 1234, 672, 422, 672, 422, 672, 422, 672, - 672, 422, 672, 1234, 422, 672, 422, 672, - 422, 672, 1234, 672, 422, 1234, 672, 422, - 672, 422, 1234, 672, 672, 672, 672, 672, - 1234, 422, 422, 672, 422, 672, 1234, 672, - 422, 1234, 672, 672, 1234, 422, 422, 672, - 422, 672, 422, 672, 1234, 1235, 1236, 1237, - 1238, 1239, 1240, 1241, 1242, 1243, 1244, 1245, - 717, 1246, 1247, 1248, 1249, 1250, 1251, 1252, - 1253, 1254, 1255, 1256, 1257, 1256, 1258, 1259, - 1260, 1261, 1262, 673, 1234, 1263, 1264, 1265, - 1266, 1267, 1268, 1269, 1270, 1271, 1272, 1273, - 1274, 1275, 1276, 1277, 1278, 1279, 1280, 1281, - 727, 1282, 1283, 1284, 694, 1285, 1286, 1287, - 1288, 1289, 1290, 673, 1291, 1292, 1293, 1294, - 1295, 1296, 1297, 1298, 676, 1299, 673, 676, - 1300, 1301, 1302, 1303, 685, 1234, 1304, 1305, - 1306, 1307, 705, 1308, 1309, 685, 1310, 1311, - 1312, 1313, 1314, 673, 1234, 1315, 1274, 1316, - 1317, 1318, 685, 1319, 1320, 676, 673, 685, - 427, 1234, 1284, 673, 676, 685, 427, 685, - 427, 1321, 685, 1234, 427, 676, 1322, 1323, - 676, 1324, 1325, 683, 1326, 1327, 1328, 1329, - 1330, 1280, 1331, 1332, 1333, 1334, 1335, 1336, - 1337, 1338, 1339, 1340, 1341, 1342, 1299, 1343, - 676, 685, 427, 1234, 1344, 1345, 685, 673, - 1234, 427, 673, 1234, 676, 1346, 733, 1347, - 1348, 1349, 1350, 1351, 1352, 1353, 1354, 673, - 1355, 1356, 1357, 1358, 1359, 1360, 673, 685, - 1234, 1362, 1363, 1364, 1365, 1366, 1367, 1368, - 1369, 1370, 1371, 1372, 1368, 1374, 1375, 1376, - 1377, 1361, 1373, 1361, 1234, 1361, 1234, 1378, - 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1385, - 1386, 1383, 769, 1387, 1387, 1387, 1382, 1388, - 1387, 770, 771, 1389, 1387, 769, 1387, 1387, - 1382, 1390, 1387, 770, 771, 1389, 1387, 769, - 1382, 1390, 1391, 1392, 1393, 769, 1387, 1387, - 1387, 1382, 1388, 770, 771, 1389, 1387, 769, - 1387, 1387, 1387, 1382, 1388, 770, 771, 1389, - 1387, 769, 1387, 1387, 1387, 1382, 1388, 771, - 770, 771, 1389, 1387, 769, 1395, 769, 1397, - 1396, 1398, 769, 1400, 1399, 769, 1401, 773, - 1401, 1402, 1401, 775, 1403, 1404, 1405, 1406, - 1407, 1408, 1409, 1406, 777, 775, 1403, 1411, - 1410, 778, 779, 1412, 1410, 777, 1414, 1413, - 1416, 1415, 777, 1417, 778, 1417, 779, 1417, - 783, 1418, 1419, 1420, 1421, 1422, 1423, 1424, - 1421, 785, 783, 1418, 1426, 1425, 786, 787, - 1427, 1425, 785, 1429, 1428, 1431, 1430, 785, - 1432, 786, 1432, 787, 1432, 1435, 1436, 1438, - 1439, 1440, 1434, 1441, 1442, 1443, 1444, 1445, - 1446, 1447, 1448, 1449, 1450, 1451, 1452, 1453, - 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1462, - 1463, 1464, 1465, 1466, 1467, 791, 791, 1433, - 1434, 1437, 1461, 1468, 1433, 1042, 791, 791, - 1470, 1471, 861, 842, 1472, 842, 1473, 1474, - 1475, 1476, 1477, 1478, 1479, 1480, 1481, 1482, - 1483, 916, 1484, 892, 1485, 1486, 1487, 1488, - 1489, 1490, 1491, 1492, 1493, 1494, 1495, 1496, - 791, 791, 791, 797, 937, 1469, 1042, 1497, - 791, 791, 791, 1042, 1497, 1042, 1042, 791, - 1497, 791, 1497, 791, 1497, 791, 1042, 1042, - 1042, 1042, 1042, 1497, 791, 1042, 1042, 1042, - 791, 1042, 791, 1497, 791, 1042, 1042, 1042, - 1042, 791, 1497, 1042, 791, 1042, 791, 1042, - 791, 1042, 1042, 791, 1042, 1497, 791, 1042, - 791, 1042, 791, 1042, 1497, 1042, 791, 1497, - 1042, 791, 1042, 791, 1497, 1042, 1042, 1042, - 1042, 1042, 1497, 791, 791, 1042, 791, 1042, - 1497, 1042, 791, 1497, 1042, 1042, 1497, 791, - 791, 1042, 791, 1042, 791, 1042, 1497, 1498, - 1499, 1500, 1501, 1502, 1503, 1504, 1505, 1506, - 1507, 1508, 1087, 1509, 1510, 1511, 1512, 1513, - 1514, 1515, 1516, 1517, 1518, 1519, 1520, 1519, - 1521, 1522, 1523, 1524, 1525, 1043, 1497, 1526, - 1527, 1528, 1529, 1530, 1531, 1532, 1533, 1534, - 1535, 1536, 1537, 1538, 1539, 1540, 1541, 1542, - 1543, 1544, 1097, 1545, 1546, 1547, 1064, 1548, - 1549, 1550, 1551, 1552, 1553, 1043, 1554, 1555, - 1556, 1557, 1558, 1559, 1560, 1561, 1046, 1562, - 1043, 1046, 1563, 1564, 1565, 1566, 1055, 1497, - 1567, 1568, 1569, 1570, 1075, 1571, 1572, 1055, - 1573, 1574, 1575, 1576, 1577, 1043, 1497, 1578, - 1537, 1579, 1580, 1581, 1055, 1582, 1583, 1046, - 1043, 1055, 797, 1497, 1547, 1043, 1046, 1055, - 797, 1055, 797, 1584, 1055, 1497, 797, 1046, - 1585, 1586, 1046, 1587, 1588, 1053, 1589, 1590, - 1591, 1592, 1593, 1543, 1594, 1595, 1596, 1597, - 1598, 1599, 1600, 1601, 1602, 1603, 1604, 1605, - 1562, 1606, 1046, 1055, 797, 1497, 1607, 1608, - 1055, 1043, 1497, 797, 1043, 1497, 1046, 1609, - 1103, 1610, 1611, 1612, 1613, 1614, 1615, 1616, - 1617, 1043, 1618, 1619, 1620, 1621, 1622, 1623, - 1043, 1055, 1497, 1625, 1626, 1627, 1628, 1629, - 1630, 1631, 1632, 1633, 1634, 1635, 1631, 1637, - 1638, 1639, 1640, 1624, 1636, 1624, 1497, 1624, - 1497, + 421, 420, 421, 421, 421, 421, 421, 421, + 421, 421, 420, 421, 421, 420, 420, 420, + 420, 421, 421, 421, 420, 420, 420, 421, + 420, 420, 420, 421, 421, 420, 421, 421, + 421, 420, 421, 420, 420, 420, 421, 421, + 420, 421, 421, 421, 420, 421, 421, 421, + 420, 420, 420, 420, 421, 471, 540, 625, + 626, 426, 471, 426, 420, 420, 421, 420, + 421, 471, 625, 426, 420, 471, 627, 426, + 420, 420, 421, 471, 628, 629, 630, 531, + 631, 632, 471, 633, 634, 635, 426, 420, + 420, 421, 421, 421, 420, 421, 421, 420, + 421, 421, 421, 421, 420, 420, 421, 420, + 420, 421, 421, 420, 421, 420, 471, 426, + 420, 636, 471, 637, 420, 426, 420, 421, + 420, 421, 638, 471, 639, 640, 420, 421, + 420, 420, 420, 421, 421, 421, 421, 420, + 641, 642, 643, 471, 644, 645, 646, 647, + 648, 649, 650, 651, 652, 653, 654, 655, + 656, 657, 426, 420, 421, 421, 421, 420, + 420, 420, 420, 421, 421, 420, 420, 421, + 420, 420, 420, 420, 420, 420, 420, 421, + 420, 421, 420, 420, 420, 420, 420, 420, + 421, 421, 421, 421, 421, 420, 420, 421, + 420, 420, 420, 421, 420, 420, 421, 420, + 420, 421, 420, 420, 421, 420, 420, 420, + 421, 421, 421, 420, 420, 420, 421, 421, + 421, 421, 420, 658, 471, 659, 471, 660, + 661, 662, 663, 426, 420, 421, 421, 421, + 421, 421, 420, 420, 420, 421, 420, 420, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 420, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 420, 421, + 421, 421, 421, 421, 420, 664, 471, 426, + 420, 421, 665, 471, 456, 426, 420, 421, + 666, 420, 426, 420, 421, 471, 667, 426, + 420, 420, 421, 668, 420, 471, 669, 426, + 420, 420, 421, 671, 670, 421, 421, 421, + 421, 671, 670, 421, 671, 670, 671, 671, + 421, 671, 670, 421, 671, 421, 671, 670, + 421, 671, 421, 671, 421, 670, 671, 671, + 671, 671, 671, 671, 671, 671, 670, 421, + 421, 671, 671, 421, 671, 421, 671, 670, + 671, 671, 671, 671, 671, 421, 671, 421, + 671, 421, 671, 670, 671, 671, 421, 671, + 421, 671, 670, 671, 671, 671, 671, 671, + 421, 671, 421, 671, 670, 421, 421, 671, + 421, 671, 670, 671, 671, 671, 421, 671, + 421, 671, 421, 671, 421, 671, 670, 671, + 421, 671, 421, 671, 670, 421, 671, 671, + 671, 671, 421, 671, 421, 671, 421, 671, + 421, 671, 421, 671, 421, 671, 670, 421, + 671, 670, 671, 671, 671, 421, 671, 421, + 671, 670, 671, 421, 671, 421, 671, 670, + 421, 671, 671, 671, 671, 421, 671, 421, + 671, 670, 421, 671, 421, 671, 421, 671, + 670, 671, 671, 421, 671, 421, 671, 670, + 421, 671, 421, 671, 421, 671, 421, 670, + 671, 671, 671, 421, 671, 421, 671, 670, + 421, 671, 670, 671, 671, 421, 671, 670, + 671, 671, 671, 421, 671, 671, 671, 671, + 671, 671, 421, 421, 671, 421, 671, 421, + 671, 421, 671, 670, 671, 421, 671, 421, + 671, 670, 421, 671, 670, 671, 421, 671, + 670, 671, 421, 671, 670, 421, 421, 671, + 670, 421, 671, 421, 671, 421, 671, 421, + 671, 421, 671, 421, 670, 671, 671, 421, + 671, 671, 671, 671, 421, 421, 671, 671, + 671, 671, 671, 421, 671, 671, 671, 671, + 671, 670, 421, 671, 671, 421, 671, 421, + 670, 671, 671, 421, 671, 670, 421, 421, + 671, 421, 670, 671, 671, 670, 421, 671, + 421, 670, 671, 670, 421, 671, 421, 671, + 421, 670, 671, 671, 670, 421, 671, 421, + 671, 421, 671, 670, 671, 421, 671, 421, + 671, 670, 421, 671, 670, 421, 421, 671, + 670, 671, 421, 670, 671, 670, 421, 671, + 421, 671, 421, 670, 671, 670, 421, 421, + 671, 670, 671, 421, 671, 421, 671, 670, + 421, 671, 421, 670, 671, 670, 421, 421, + 671, 421, 670, 671, 670, 421, 421, 671, + 670, 671, 421, 671, 670, 671, 421, 671, + 670, 671, 421, 671, 421, 671, 421, 670, + 671, 670, 421, 421, 671, 670, 671, 421, + 671, 421, 671, 670, 421, 671, 670, 671, + 671, 421, 671, 421, 671, 670, 421, 670, + 670, 421, 671, 671, 421, 671, 671, 671, + 671, 671, 671, 671, 670, 421, 671, 671, + 671, 421, 670, 671, 671, 671, 421, 671, + 421, 671, 421, 671, 421, 671, 421, 671, + 670, 421, 421, 671, 670, 671, 421, 671, + 670, 421, 421, 671, 421, 421, 421, 671, + 421, 671, 421, 671, 421, 671, 421, 670, + 421, 671, 421, 671, 421, 670, 671, 670, + 421, 671, 421, 670, 671, 421, 671, 671, + 671, 670, 421, 671, 421, 421, 671, 421, + 670, 671, 671, 670, 421, 671, 671, 671, + 671, 421, 671, 421, 670, 671, 671, 671, + 421, 671, 670, 671, 421, 671, 421, 671, + 421, 671, 421, 671, 670, 671, 671, 421, + 671, 670, 421, 671, 421, 671, 421, 670, + 671, 671, 670, 421, 671, 421, 670, 671, + 670, 421, 671, 670, 421, 671, 421, 671, + 670, 671, 671, 671, 670, 421, 421, 421, + 671, 670, 421, 671, 421, 670, 671, 670, + 421, 671, 421, 671, 421, 670, 671, 671, + 671, 670, 421, 671, 421, 670, 671, 671, + 671, 671, 670, 421, 671, 421, 671, 670, + 421, 421, 671, 421, 671, 670, 671, 421, + 671, 421, 670, 671, 671, 670, 421, 671, + 421, 671, 670, 421, 671, 671, 671, 421, + 671, 421, 670, 421, 671, 670, 671, 421, + 421, 671, 421, 671, 421, 670, 671, 671, + 671, 671, 670, 421, 671, 421, 671, 421, + 671, 421, 671, 421, 671, 670, 671, 671, + 671, 421, 671, 421, 671, 421, 671, 421, + 670, 671, 671, 421, 421, 671, 670, 671, + 421, 671, 671, 670, 421, 671, 421, 671, + 670, 421, 421, 671, 671, 671, 671, 421, + 671, 421, 671, 421, 670, 671, 671, 421, + 670, 671, 670, 421, 671, 421, 670, 671, + 670, 421, 671, 421, 670, 671, 421, 671, + 671, 670, 421, 671, 671, 421, 670, 671, + 670, 421, 671, 421, 671, 670, 671, 421, + 671, 421, 670, 671, 670, 421, 671, 421, + 671, 421, 671, 421, 671, 421, 671, 670, + 672, 670, 673, 674, 675, 676, 677, 678, + 679, 680, 681, 682, 683, 675, 684, 685, + 686, 687, 688, 675, 689, 690, 691, 692, + 693, 694, 695, 696, 697, 698, 699, 700, + 701, 702, 703, 675, 704, 672, 684, 672, + 705, 672, 670, 671, 671, 671, 671, 421, + 670, 671, 671, 670, 421, 671, 670, 421, + 421, 671, 670, 421, 671, 421, 670, 671, + 670, 421, 421, 671, 421, 670, 671, 671, + 670, 421, 671, 671, 671, 670, 421, 671, + 421, 671, 671, 670, 421, 421, 671, 421, + 670, 671, 670, 421, 671, 670, 421, 421, + 671, 421, 671, 670, 421, 671, 421, 421, + 671, 421, 671, 421, 670, 671, 671, 670, + 421, 671, 671, 421, 671, 670, 421, 671, + 421, 671, 670, 421, 671, 421, 670, 421, + 671, 671, 671, 421, 671, 670, 671, 421, + 671, 670, 421, 671, 670, 671, 421, 671, + 670, 421, 671, 670, 421, 671, 421, 671, + 670, 421, 671, 670, 421, 671, 670, 706, + 707, 708, 709, 710, 711, 712, 713, 714, + 715, 716, 717, 677, 718, 719, 720, 721, + 722, 719, 723, 724, 725, 726, 727, 728, + 729, 730, 731, 672, 670, 671, 421, 671, + 670, 671, 421, 671, 670, 671, 421, 671, + 670, 671, 421, 671, 670, 421, 671, 421, + 671, 670, 671, 421, 671, 670, 671, 421, + 421, 421, 671, 670, 671, 421, 671, 670, + 671, 671, 671, 671, 421, 671, 421, 670, + 671, 670, 421, 421, 671, 421, 671, 670, + 671, 421, 671, 670, 421, 671, 670, 671, + 671, 421, 671, 670, 421, 671, 670, 671, + 421, 671, 670, 421, 671, 670, 421, 671, + 670, 421, 671, 670, 671, 670, 421, 421, + 671, 670, 671, 421, 671, 670, 421, 671, + 421, 670, 671, 670, 421, 675, 732, 672, + 675, 733, 675, 734, 684, 672, 670, 671, + 670, 421, 671, 670, 421, 675, 733, 684, + 672, 670, 675, 735, 672, 684, 672, 670, + 671, 670, 421, 675, 736, 693, 737, 719, + 738, 731, 675, 739, 740, 741, 672, 684, + 672, 670, 671, 670, 421, 671, 421, 671, + 670, 421, 671, 421, 671, 421, 670, 671, + 671, 670, 421, 671, 421, 671, 670, 421, + 671, 670, 675, 684, 426, 670, 742, 675, + 743, 684, 672, 670, 426, 671, 670, 421, + 671, 670, 421, 744, 675, 745, 746, 672, + 670, 421, 671, 670, 671, 671, 670, 421, + 421, 671, 421, 671, 670, 675, 747, 748, + 749, 750, 751, 752, 753, 754, 755, 756, + 757, 672, 684, 672, 670, 671, 421, 671, + 671, 671, 671, 671, 671, 671, 421, 671, + 421, 671, 671, 671, 671, 671, 671, 670, + 421, 671, 671, 421, 671, 421, 670, 671, + 421, 671, 671, 671, 421, 671, 671, 421, + 671, 671, 421, 671, 671, 421, 671, 671, + 670, 421, 675, 758, 675, 734, 759, 760, + 761, 672, 684, 672, 670, 671, 670, 421, + 671, 671, 671, 421, 671, 671, 671, 421, + 671, 421, 671, 670, 421, 421, 421, 421, + 671, 671, 421, 421, 421, 421, 421, 671, + 671, 671, 671, 671, 671, 671, 421, 671, + 421, 671, 421, 670, 671, 671, 671, 421, + 671, 421, 671, 670, 684, 426, 762, 675, + 684, 426, 671, 670, 421, 763, 675, 764, + 684, 426, 671, 670, 421, 671, 421, 765, + 684, 672, 670, 426, 671, 670, 421, 675, + 766, 672, 684, 672, 670, 671, 670, 421, + 767, 768, 767, 769, 770, 767, 771, 767, + 772, 767, 770, 773, 774, 773, 776, 775, + 777, 778, 777, 779, 780, 775, 781, 775, + 782, 777, 783, 778, 784, 779, 786, 785, + 787, 788, 788, 785, 789, 785, 790, 787, + 791, 788, 792, 788, 794, 794, 794, 794, + 793, 794, 794, 794, 793, 794, 793, 794, + 794, 793, 793, 793, 793, 793, 793, 794, + 793, 793, 793, 793, 794, 794, 794, 794, + 794, 793, 793, 794, 793, 793, 794, 793, + 794, 793, 793, 794, 793, 793, 793, 794, + 794, 794, 794, 794, 794, 793, 794, 794, + 793, 794, 794, 793, 793, 793, 793, 793, + 793, 794, 794, 793, 793, 794, 793, 794, + 794, 794, 793, 796, 797, 798, 799, 800, + 801, 802, 803, 804, 805, 806, 807, 808, + 809, 810, 811, 812, 813, 814, 815, 816, + 817, 818, 819, 820, 821, 822, 823, 824, + 825, 826, 827, 793, 794, 793, 794, 793, + 794, 794, 793, 794, 794, 793, 793, 793, + 794, 793, 793, 793, 793, 793, 793, 793, + 794, 793, 793, 793, 793, 793, 793, 793, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 793, 793, 793, 793, 793, + 793, 793, 793, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 793, 793, 793, 793, + 793, 793, 793, 793, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 793, 794, 794, + 794, 794, 794, 794, 794, 794, 793, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 793, 794, 794, 794, 794, 794, + 794, 793, 794, 794, 794, 794, 794, 794, + 793, 793, 793, 793, 793, 793, 793, 793, + 794, 794, 794, 794, 794, 794, 794, 794, + 793, 794, 794, 794, 794, 794, 794, 794, + 794, 793, 794, 794, 794, 794, 794, 793, + 793, 793, 793, 793, 793, 793, 793, 794, + 794, 794, 794, 794, 794, 793, 794, 794, + 794, 794, 794, 794, 794, 793, 794, 793, + 794, 794, 793, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 793, 794, 794, 794, 794, 794, 793, 794, + 794, 794, 794, 794, 794, 794, 793, 794, + 794, 794, 793, 794, 794, 794, 793, 794, + 793, 828, 829, 830, 831, 832, 833, 834, + 835, 836, 837, 838, 839, 840, 841, 842, + 843, 844, 845, 846, 847, 848, 849, 850, + 851, 852, 853, 854, 855, 856, 857, 858, + 859, 860, 861, 862, 863, 800, 864, 865, + 866, 867, 868, 869, 800, 845, 800, 793, + 794, 793, 794, 794, 793, 793, 794, 793, + 793, 793, 793, 794, 793, 793, 793, 793, + 793, 794, 793, 793, 793, 793, 793, 794, + 794, 794, 794, 794, 793, 793, 793, 794, + 793, 793, 793, 794, 794, 794, 793, 793, + 793, 794, 794, 793, 793, 793, 794, 794, + 794, 793, 793, 793, 794, 794, 794, 794, + 793, 794, 794, 794, 794, 793, 793, 793, + 793, 793, 794, 794, 794, 794, 793, 793, + 794, 794, 794, 793, 793, 794, 794, 794, + 794, 793, 794, 794, 793, 794, 794, 793, + 793, 793, 794, 794, 794, 793, 793, 793, + 793, 794, 794, 794, 794, 794, 793, 793, + 793, 793, 794, 793, 794, 794, 793, 794, + 794, 793, 794, 793, 794, 794, 794, 793, + 794, 794, 793, 793, 793, 794, 793, 793, + 793, 793, 793, 793, 793, 794, 794, 794, + 794, 793, 794, 794, 794, 794, 794, 794, + 794, 793, 870, 871, 872, 873, 874, 875, + 876, 877, 878, 800, 879, 880, 881, 882, + 883, 793, 794, 793, 793, 793, 793, 793, + 794, 794, 793, 794, 794, 794, 793, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 793, 794, 794, 794, 793, 793, 794, + 794, 794, 793, 793, 794, 793, 793, 794, + 794, 794, 794, 794, 793, 793, 793, 793, + 794, 794, 794, 794, 794, 794, 793, 794, + 794, 794, 794, 794, 793, 884, 839, 885, + 886, 887, 800, 888, 889, 845, 800, 793, + 794, 794, 794, 794, 793, 793, 793, 794, + 793, 793, 794, 794, 794, 793, 793, 793, + 794, 794, 793, 850, 793, 845, 800, 800, + 890, 793, 800, 793, 794, 845, 891, 892, + 845, 893, 894, 845, 895, 896, 897, 898, + 899, 900, 845, 901, 902, 903, 845, 904, + 905, 906, 864, 907, 908, 909, 864, 910, + 845, 800, 793, 793, 794, 794, 793, 793, + 793, 794, 794, 794, 794, 793, 794, 794, + 793, 793, 793, 793, 794, 794, 793, 793, + 794, 794, 793, 793, 793, 793, 793, 793, + 794, 794, 794, 793, 793, 793, 794, 793, + 793, 793, 794, 794, 793, 794, 794, 794, + 794, 793, 794, 794, 794, 794, 793, 794, + 794, 794, 794, 794, 794, 793, 793, 793, + 794, 794, 794, 794, 793, 911, 912, 793, + 800, 793, 794, 793, 793, 794, 845, 913, + 914, 915, 916, 895, 917, 918, 919, 920, + 921, 922, 923, 924, 925, 926, 927, 928, + 800, 793, 793, 794, 793, 794, 794, 794, + 794, 794, 794, 794, 793, 794, 794, 794, + 793, 794, 793, 793, 794, 793, 794, 793, + 793, 794, 794, 794, 794, 793, 794, 794, + 794, 793, 793, 794, 794, 794, 794, 793, + 794, 794, 793, 793, 794, 794, 794, 794, + 794, 793, 929, 930, 931, 932, 933, 934, + 935, 936, 937, 938, 939, 935, 941, 942, + 943, 944, 940, 793, 945, 946, 845, 947, + 948, 949, 950, 951, 952, 953, 954, 955, + 845, 800, 956, 957, 958, 959, 845, 960, + 961, 962, 963, 964, 965, 966, 967, 968, + 969, 970, 971, 972, 973, 974, 845, 876, + 800, 975, 793, 794, 794, 794, 794, 794, + 793, 793, 793, 794, 793, 794, 794, 793, + 794, 793, 794, 794, 793, 793, 793, 794, + 794, 794, 793, 793, 793, 794, 794, 794, + 793, 793, 793, 793, 794, 793, 793, 794, + 793, 793, 794, 794, 794, 793, 793, 794, + 793, 794, 794, 794, 793, 794, 794, 794, + 794, 794, 794, 793, 793, 793, 794, 794, + 793, 794, 794, 793, 794, 794, 793, 794, + 794, 793, 794, 794, 794, 794, 794, 794, + 794, 793, 794, 793, 794, 793, 794, 794, + 793, 794, 793, 794, 794, 793, 794, 793, + 794, 793, 976, 947, 977, 978, 979, 980, + 981, 982, 983, 984, 985, 828, 986, 845, + 987, 988, 989, 845, 990, 860, 991, 992, + 993, 994, 995, 996, 997, 998, 845, 793, + 793, 793, 794, 794, 794, 793, 794, 794, + 793, 794, 794, 793, 793, 793, 793, 793, + 794, 794, 794, 794, 793, 794, 794, 794, + 794, 794, 794, 793, 793, 793, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 793, + 794, 794, 794, 794, 794, 794, 794, 794, + 793, 794, 794, 793, 793, 793, 793, 794, + 794, 794, 793, 793, 793, 794, 793, 793, + 793, 794, 794, 793, 794, 794, 794, 793, + 794, 793, 793, 793, 794, 794, 793, 794, + 794, 794, 793, 794, 794, 794, 793, 793, + 793, 793, 794, 845, 914, 999, 1000, 800, + 845, 800, 793, 793, 794, 793, 794, 845, + 999, 800, 793, 845, 1001, 800, 793, 793, + 794, 845, 1002, 1003, 1004, 905, 1005, 1006, + 845, 1007, 1008, 1009, 800, 793, 793, 794, + 794, 794, 793, 794, 794, 793, 794, 794, + 794, 794, 793, 793, 794, 793, 793, 794, + 794, 793, 794, 793, 845, 800, 793, 1010, + 845, 1011, 793, 800, 793, 794, 793, 794, + 1012, 845, 1013, 1014, 793, 794, 793, 793, + 793, 794, 794, 794, 794, 793, 1015, 1016, + 1017, 845, 1018, 1019, 1020, 1021, 1022, 1023, + 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, + 800, 793, 794, 794, 794, 793, 793, 793, + 793, 794, 794, 793, 793, 794, 793, 793, + 793, 793, 793, 793, 793, 794, 793, 794, + 793, 793, 793, 793, 793, 793, 794, 794, + 794, 794, 794, 793, 793, 794, 793, 793, + 793, 794, 793, 793, 794, 793, 793, 794, + 793, 793, 794, 793, 793, 793, 794, 794, + 794, 793, 793, 793, 794, 794, 794, 794, + 793, 1032, 845, 1033, 845, 1034, 1035, 1036, + 1037, 800, 793, 794, 794, 794, 794, 794, + 793, 793, 793, 794, 793, 793, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 793, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 793, 794, 794, 794, + 794, 794, 793, 1038, 845, 800, 793, 794, + 1039, 845, 830, 800, 793, 794, 1040, 793, + 800, 793, 794, 845, 1041, 800, 793, 793, + 794, 1042, 793, 845, 1043, 800, 793, 793, + 794, 1045, 1044, 794, 794, 794, 794, 1045, + 1044, 794, 1045, 1044, 1045, 1045, 794, 1045, + 1044, 794, 1045, 794, 1045, 1044, 794, 1045, + 794, 1045, 794, 1044, 1045, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 1044, 794, 794, 1045, + 1045, 794, 1045, 794, 1045, 1044, 1045, 1045, + 1045, 1045, 1045, 794, 1045, 794, 1045, 794, + 1045, 1044, 1045, 1045, 794, 1045, 794, 1045, + 1044, 1045, 1045, 1045, 1045, 1045, 794, 1045, + 794, 1045, 1044, 794, 794, 1045, 794, 1045, + 1044, 1045, 1045, 1045, 794, 1045, 794, 1045, + 794, 1045, 794, 1045, 1044, 1045, 794, 1045, + 794, 1045, 1044, 794, 1045, 1045, 1045, 1045, + 794, 1045, 794, 1045, 794, 1045, 794, 1045, + 794, 1045, 794, 1045, 1044, 794, 1045, 1044, + 1045, 1045, 1045, 794, 1045, 794, 1045, 1044, + 1045, 794, 1045, 794, 1045, 1044, 794, 1045, + 1045, 1045, 1045, 794, 1045, 794, 1045, 1044, + 794, 1045, 794, 1045, 794, 1045, 1044, 1045, + 1045, 794, 1045, 794, 1045, 1044, 794, 1045, + 794, 1045, 794, 1045, 794, 1044, 1045, 1045, + 1045, 794, 1045, 794, 1045, 1044, 794, 1045, + 1044, 1045, 1045, 794, 1045, 1044, 1045, 1045, + 1045, 794, 1045, 1045, 1045, 1045, 1045, 1045, + 794, 794, 1045, 794, 1045, 794, 1045, 794, + 1045, 1044, 1045, 794, 1045, 794, 1045, 1044, + 794, 1045, 1044, 1045, 794, 1045, 1044, 1045, + 794, 1045, 1044, 794, 794, 1045, 1044, 794, + 1045, 794, 1045, 794, 1045, 794, 1045, 794, + 1045, 794, 1044, 1045, 1045, 794, 1045, 1045, + 1045, 1045, 794, 794, 1045, 1045, 1045, 1045, + 1045, 794, 1045, 1045, 1045, 1045, 1045, 1044, + 794, 1045, 1045, 794, 1045, 794, 1044, 1045, + 1045, 794, 1045, 1044, 794, 794, 1045, 794, + 1044, 1045, 1045, 1044, 794, 1045, 794, 1044, + 1045, 1044, 794, 1045, 794, 1045, 794, 1044, + 1045, 1045, 1044, 794, 1045, 794, 1045, 794, + 1045, 1044, 1045, 794, 1045, 794, 1045, 1044, + 794, 1045, 1044, 794, 794, 1045, 1044, 1045, + 794, 1044, 1045, 1044, 794, 1045, 794, 1045, + 794, 1044, 1045, 1044, 794, 794, 1045, 1044, + 1045, 794, 1045, 794, 1045, 1044, 794, 1045, + 794, 1044, 1045, 1044, 794, 794, 1045, 794, + 1044, 1045, 1044, 794, 794, 1045, 1044, 1045, + 794, 1045, 1044, 1045, 794, 1045, 1044, 1045, + 794, 1045, 794, 1045, 794, 1044, 1045, 1044, + 794, 794, 1045, 1044, 1045, 794, 1045, 794, + 1045, 1044, 794, 1045, 1044, 1045, 1045, 794, + 1045, 794, 1045, 1044, 794, 1044, 1044, 794, + 1045, 1045, 794, 1045, 1045, 1045, 1045, 1045, + 1045, 1045, 1044, 794, 1045, 1045, 1045, 794, + 1044, 1045, 1045, 1045, 794, 1045, 794, 1045, + 794, 1045, 794, 1045, 794, 1045, 1044, 794, + 794, 1045, 1044, 1045, 794, 1045, 1044, 794, + 794, 1045, 794, 794, 794, 1045, 794, 1045, + 794, 1045, 794, 1045, 794, 1044, 794, 1045, + 794, 1045, 794, 1044, 1045, 1044, 794, 1045, + 794, 1044, 1045, 794, 1045, 1045, 1045, 1044, + 794, 1045, 794, 794, 1045, 794, 1044, 1045, + 1045, 1044, 794, 1045, 1045, 1045, 1045, 794, + 1045, 794, 1044, 1045, 1045, 1045, 794, 1045, + 1044, 1045, 794, 1045, 794, 1045, 794, 1045, + 794, 1045, 1044, 1045, 1045, 794, 1045, 1044, + 794, 1045, 794, 1045, 794, 1044, 1045, 1045, + 1044, 794, 1045, 794, 1044, 1045, 1044, 794, + 1045, 1044, 794, 1045, 794, 1045, 1044, 1045, + 1045, 1045, 1044, 794, 794, 794, 1045, 1044, + 794, 1045, 794, 1044, 1045, 1044, 794, 1045, + 794, 1045, 794, 1044, 1045, 1045, 1045, 1044, + 794, 1045, 794, 1044, 1045, 1045, 1045, 1045, + 1044, 794, 1045, 794, 1045, 1044, 794, 794, + 1045, 794, 1045, 1044, 1045, 794, 1045, 794, + 1044, 1045, 1045, 1044, 794, 1045, 794, 1045, + 1044, 794, 1045, 1045, 1045, 794, 1045, 794, + 1044, 794, 1045, 1044, 1045, 794, 794, 1045, + 794, 1045, 794, 1044, 1045, 1045, 1045, 1045, + 1044, 794, 1045, 794, 1045, 794, 1045, 794, + 1045, 794, 1045, 1044, 1045, 1045, 1045, 794, + 1045, 794, 1045, 794, 1045, 794, 1044, 1045, + 1045, 794, 794, 1045, 1044, 1045, 794, 1045, + 1045, 1044, 794, 1045, 794, 1045, 1044, 794, + 794, 1045, 1045, 1045, 1045, 794, 1045, 794, + 1045, 794, 1044, 1045, 1045, 794, 1044, 1045, + 1044, 794, 1045, 794, 1044, 1045, 1044, 794, + 1045, 794, 1044, 1045, 794, 1045, 1045, 1044, + 794, 1045, 1045, 794, 1044, 1045, 1044, 794, + 1045, 794, 1045, 1044, 1045, 794, 1045, 794, + 1044, 1045, 1044, 794, 1045, 794, 1045, 794, + 1045, 794, 1045, 794, 1045, 1044, 1046, 1044, + 1047, 1048, 1049, 1050, 1051, 1052, 1053, 1054, + 1055, 1056, 1057, 1049, 1058, 1059, 1060, 1061, + 1062, 1049, 1063, 1064, 1065, 1066, 1067, 1068, + 1069, 1070, 1071, 1072, 1073, 1074, 1075, 1076, + 1077, 1049, 1078, 1046, 1058, 1046, 1079, 1046, + 1044, 1045, 1045, 1045, 1045, 794, 1044, 1045, + 1045, 1044, 794, 1045, 1044, 794, 794, 1045, + 1044, 794, 1045, 794, 1044, 1045, 1044, 794, + 794, 1045, 794, 1044, 1045, 1045, 1044, 794, + 1045, 1045, 1045, 1044, 794, 1045, 794, 1045, + 1045, 1044, 794, 794, 1045, 794, 1044, 1045, + 1044, 794, 1045, 1044, 794, 794, 1045, 794, + 1045, 1044, 794, 1045, 794, 794, 1045, 794, + 1045, 794, 1044, 1045, 1045, 1044, 794, 1045, + 1045, 794, 1045, 1044, 794, 1045, 794, 1045, + 1044, 794, 1045, 794, 1044, 794, 1045, 1045, + 1045, 794, 1045, 1044, 1045, 794, 1045, 1044, + 794, 1045, 1044, 1045, 794, 1045, 1044, 794, + 1045, 1044, 794, 1045, 794, 1045, 1044, 794, + 1045, 1044, 794, 1045, 1044, 1080, 1081, 1082, + 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, + 1091, 1051, 1092, 1093, 1094, 1095, 1096, 1093, + 1097, 1098, 1099, 1100, 1101, 1102, 1103, 1104, + 1105, 1046, 1044, 1045, 794, 1045, 1044, 1045, + 794, 1045, 1044, 1045, 794, 1045, 1044, 1045, + 794, 1045, 1044, 794, 1045, 794, 1045, 1044, + 1045, 794, 1045, 1044, 1045, 794, 794, 794, + 1045, 1044, 1045, 794, 1045, 1044, 1045, 1045, + 1045, 1045, 794, 1045, 794, 1044, 1045, 1044, + 794, 794, 1045, 794, 1045, 1044, 1045, 794, + 1045, 1044, 794, 1045, 1044, 1045, 1045, 794, + 1045, 1044, 794, 1045, 1044, 1045, 794, 1045, + 1044, 794, 1045, 1044, 794, 1045, 1044, 794, + 1045, 1044, 1045, 1044, 794, 794, 1045, 1044, + 1045, 794, 1045, 1044, 794, 1045, 794, 1044, + 1045, 1044, 794, 1049, 1106, 1046, 1049, 1107, + 1049, 1108, 1058, 1046, 1044, 1045, 1044, 794, + 1045, 1044, 794, 1049, 1107, 1058, 1046, 1044, + 1049, 1109, 1046, 1058, 1046, 1044, 1045, 1044, + 794, 1049, 1110, 1067, 1111, 1093, 1112, 1105, + 1049, 1113, 1114, 1115, 1046, 1058, 1046, 1044, + 1045, 1044, 794, 1045, 794, 1045, 1044, 794, + 1045, 794, 1045, 794, 1044, 1045, 1045, 1044, + 794, 1045, 794, 1045, 1044, 794, 1045, 1044, + 1049, 1058, 800, 1044, 1116, 1049, 1117, 1058, + 1046, 1044, 800, 1045, 1044, 794, 1045, 1044, + 794, 1118, 1049, 1119, 1120, 1046, 1044, 794, + 1045, 1044, 1045, 1045, 1044, 794, 794, 1045, + 794, 1045, 1044, 1049, 1121, 1122, 1123, 1124, + 1125, 1126, 1127, 1128, 1129, 1130, 1131, 1046, + 1058, 1046, 1044, 1045, 794, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 794, 1045, 794, 1045, + 1045, 1045, 1045, 1045, 1045, 1044, 794, 1045, + 1045, 794, 1045, 794, 1044, 1045, 794, 1045, + 1045, 1045, 794, 1045, 1045, 794, 1045, 1045, + 794, 1045, 1045, 794, 1045, 1045, 1044, 794, + 1049, 1132, 1049, 1108, 1133, 1134, 1135, 1046, + 1058, 1046, 1044, 1045, 1044, 794, 1045, 1045, + 1045, 794, 1045, 1045, 1045, 794, 1045, 794, + 1045, 1044, 794, 794, 794, 794, 1045, 1045, + 794, 794, 794, 794, 794, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 794, 1045, 794, 1045, + 794, 1044, 1045, 1045, 1045, 794, 1045, 794, + 1045, 1044, 1058, 800, 1136, 1049, 1058, 800, + 1045, 1044, 794, 1137, 1049, 1138, 1058, 800, + 1045, 1044, 794, 1045, 794, 1139, 1058, 1046, + 1044, 800, 1045, 1044, 794, 1049, 1140, 1046, + 1058, 1046, 1044, 1045, 1044, 794, 1141, 1142, + 1143, 1141, 1144, 1145, 1146, 1148, 1149, 1150, + 1151, 1152, 1153, 671, 671, 421, 1154, 1155, + 1156, 1157, 671, 1160, 1161, 1163, 1164, 1165, + 1159, 1166, 1167, 1168, 1169, 1170, 1171, 1172, + 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, + 1181, 1182, 1183, 1184, 1185, 1187, 1188, 1189, + 1190, 1191, 1192, 671, 1147, 9, 1147, 421, + 1147, 421, 1159, 1162, 1186, 1193, 1158, 1141, + 1141, 1194, 1142, 1195, 1197, 1196, 2, 1, + 1198, 1196, 1199, 1196, 5, 1, 1196, 8, + 10, 10, 9, 1200, 1201, 1202, 1196, 1203, + 1204, 1196, 1205, 1196, 421, 421, 1207, 1208, + 490, 471, 1209, 471, 1210, 1211, 1212, 1213, + 1214, 1215, 1216, 1217, 1218, 1219, 1220, 545, + 1221, 521, 1222, 1223, 1224, 1225, 1226, 1227, + 1228, 1229, 1230, 1231, 1232, 1233, 421, 421, + 421, 426, 566, 1206, 1234, 1196, 1235, 1196, + 671, 1236, 421, 421, 421, 671, 1236, 671, + 671, 421, 1236, 421, 1236, 421, 1236, 421, + 671, 671, 671, 671, 671, 1236, 421, 671, + 671, 671, 421, 671, 421, 1236, 421, 671, + 671, 671, 671, 421, 1236, 671, 421, 671, + 421, 671, 421, 671, 671, 421, 671, 1236, + 421, 671, 421, 671, 421, 671, 1236, 671, + 421, 1236, 671, 421, 671, 421, 1236, 671, + 671, 671, 671, 671, 1236, 421, 421, 671, + 421, 671, 1236, 671, 421, 1236, 671, 671, + 1236, 421, 421, 671, 421, 671, 421, 671, + 1236, 1237, 1238, 1239, 1240, 1241, 1242, 1243, + 1244, 1245, 1246, 1247, 716, 1248, 1249, 1250, + 1251, 1252, 1253, 1254, 1255, 1256, 1257, 1258, + 1259, 1258, 1260, 1261, 1262, 1263, 1264, 672, + 1236, 1265, 1266, 1267, 1268, 1269, 1270, 1271, + 1272, 1273, 1274, 1275, 1276, 1277, 1278, 1279, + 1280, 1281, 1282, 1283, 726, 1284, 1285, 1286, + 693, 1287, 1288, 1289, 1290, 1291, 1292, 672, + 1293, 1294, 1295, 1296, 1297, 1298, 1299, 1300, + 675, 1301, 672, 675, 1302, 1303, 1304, 1305, + 684, 1236, 1306, 1307, 1308, 1309, 704, 1310, + 1311, 684, 1312, 1313, 1314, 1315, 1316, 672, + 1236, 1317, 1276, 1318, 1319, 1320, 684, 1321, + 1322, 675, 672, 684, 426, 1236, 1286, 672, + 675, 684, 426, 684, 426, 1323, 684, 1236, + 426, 675, 1324, 1325, 675, 1326, 1327, 682, + 1328, 1329, 1330, 1331, 1332, 1282, 1333, 1334, + 1335, 1336, 1337, 1338, 1339, 1340, 1341, 1342, + 1343, 1344, 1301, 1345, 675, 684, 426, 1236, + 1346, 1347, 684, 672, 1236, 426, 672, 1236, + 675, 1348, 732, 1349, 1350, 1351, 1352, 1353, + 1354, 1355, 1356, 672, 1357, 1358, 1359, 1360, + 1361, 1362, 672, 684, 1236, 1364, 1365, 1366, + 1367, 1368, 1369, 1370, 1371, 1372, 1373, 1374, + 1370, 1376, 1377, 1378, 1379, 1363, 1375, 1363, + 1236, 1363, 1236, 1380, 1380, 1381, 1382, 1383, + 1384, 1385, 1386, 1387, 1388, 1385, 770, 1389, + 1389, 1389, 1390, 1391, 1384, 1389, 771, 772, + 1392, 1389, 770, 1393, 1393, 1393, 1395, 1396, + 1397, 1393, 1398, 1399, 1400, 1393, 1394, 1401, + 1401, 1401, 1403, 1404, 1405, 1401, 1406, 1407, + 1408, 1401, 1402, 1389, 1389, 1409, 1410, 1384, + 1389, 771, 772, 1392, 1389, 770, 1411, 1412, + 1413, 770, 1414, 1415, 1416, 768, 768, 768, + 768, 1418, 1419, 1420, 1394, 768, 1421, 1422, + 1423, 768, 1417, 769, 769, 769, 1425, 1426, + 1427, 1394, 769, 1428, 1429, 1430, 769, 1424, + 768, 768, 768, 1432, 1433, 1434, 1402, 768, + 1435, 1436, 1437, 768, 1431, 1393, 1393, 770, + 1438, 1439, 1397, 1393, 1398, 1399, 1400, 1393, + 1394, 1440, 1441, 1442, 770, 1443, 1444, 1445, + 769, 769, 769, 769, 1447, 1448, 1449, 1402, + 769, 1450, 1451, 1452, 769, 1446, 1401, 1401, + 770, 1453, 1454, 1405, 1401, 1406, 1407, 1408, + 1401, 1402, 1401, 1401, 1401, 1403, 1404, 1405, + 770, 1406, 1407, 1408, 1401, 1402, 1401, 1401, + 1401, 1403, 1404, 1405, 771, 1406, 1407, 1408, + 1401, 1402, 1401, 1401, 1401, 1403, 1404, 1405, + 772, 1406, 1407, 1408, 1401, 1402, 1393, 1393, + 1393, 1395, 1396, 1397, 770, 1398, 1399, 1400, + 1393, 1394, 1393, 1393, 1393, 1395, 1396, 1397, + 771, 1398, 1399, 1400, 1393, 1394, 1393, 1393, + 1393, 1395, 1396, 1397, 772, 1398, 1399, 1400, + 1393, 1394, 1456, 768, 1458, 1457, 1459, 769, + 1461, 1460, 770, 1462, 774, 1462, 1463, 1462, + 776, 1464, 1465, 1466, 1467, 1468, 1469, 1470, + 1467, 780, 776, 1464, 1472, 1473, 1471, 781, + 782, 1474, 1471, 780, 1477, 1478, 1479, 1480, + 1475, 1481, 1482, 1483, 1475, 1476, 1486, 1487, + 1488, 1489, 1484, 1490, 1491, 1492, 1484, 1485, + 1494, 1493, 1496, 1495, 780, 1497, 781, 1497, + 782, 1497, 786, 1498, 1499, 1500, 1501, 1502, + 1503, 1504, 1501, 788, 786, 1498, 1506, 1505, + 789, 790, 1507, 1505, 788, 1509, 1508, 1511, + 1510, 788, 1512, 789, 1512, 790, 1512, 794, + 1515, 1516, 1518, 1519, 1520, 1514, 1521, 1522, + 1523, 1524, 1525, 1526, 1527, 1528, 1529, 1530, + 1531, 1532, 1533, 1534, 1535, 1536, 1537, 1538, + 1539, 1540, 1542, 1543, 1544, 1545, 1546, 1547, + 794, 794, 1513, 1514, 1517, 1541, 1548, 1513, + 1045, 794, 794, 1550, 1551, 864, 845, 1552, + 845, 1553, 1554, 1555, 1556, 1557, 1558, 1559, + 1560, 1561, 1562, 1563, 919, 1564, 895, 1565, + 1566, 1567, 1568, 1569, 1570, 1571, 1572, 1573, + 1574, 1575, 1576, 794, 794, 794, 800, 940, + 1549, 1045, 1577, 794, 794, 794, 1045, 1577, + 1045, 1045, 794, 1577, 794, 1577, 794, 1577, + 794, 1045, 1045, 1045, 1045, 1045, 1577, 794, + 1045, 1045, 1045, 794, 1045, 794, 1577, 794, + 1045, 1045, 1045, 1045, 794, 1577, 1045, 794, + 1045, 794, 1045, 794, 1045, 1045, 794, 1045, + 1577, 794, 1045, 794, 1045, 794, 1045, 1577, + 1045, 794, 1577, 1045, 794, 1045, 794, 1577, + 1045, 1045, 1045, 1045, 1045, 1577, 794, 794, + 1045, 794, 1045, 1577, 1045, 794, 1577, 1045, + 1045, 1577, 794, 794, 1045, 794, 1045, 794, + 1045, 1577, 1578, 1579, 1580, 1581, 1582, 1583, + 1584, 1585, 1586, 1587, 1588, 1090, 1589, 1590, + 1591, 1592, 1593, 1594, 1595, 1596, 1597, 1598, + 1599, 1600, 1599, 1601, 1602, 1603, 1604, 1605, + 1046, 1577, 1606, 1607, 1608, 1609, 1610, 1611, + 1612, 1613, 1614, 1615, 1616, 1617, 1618, 1619, + 1620, 1621, 1622, 1623, 1624, 1100, 1625, 1626, + 1627, 1067, 1628, 1629, 1630, 1631, 1632, 1633, + 1046, 1634, 1635, 1636, 1637, 1638, 1639, 1640, + 1641, 1049, 1642, 1046, 1049, 1643, 1644, 1645, + 1646, 1058, 1577, 1647, 1648, 1649, 1650, 1078, + 1651, 1652, 1058, 1653, 1654, 1655, 1656, 1657, + 1046, 1577, 1658, 1617, 1659, 1660, 1661, 1058, + 1662, 1663, 1049, 1046, 1058, 800, 1577, 1627, + 1046, 1049, 1058, 800, 1058, 800, 1664, 1058, + 1577, 800, 1049, 1665, 1666, 1049, 1667, 1668, + 1056, 1669, 1670, 1671, 1672, 1673, 1623, 1674, + 1675, 1676, 1677, 1678, 1679, 1680, 1681, 1682, + 1683, 1684, 1685, 1642, 1686, 1049, 1058, 800, + 1577, 1687, 1688, 1058, 1046, 1577, 800, 1046, + 1577, 1049, 1689, 1106, 1690, 1691, 1692, 1693, + 1694, 1695, 1696, 1697, 1046, 1698, 1699, 1700, + 1701, 1702, 1703, 1046, 1058, 1577, 1705, 1706, + 1707, 1708, 1709, 1710, 1711, 1712, 1713, 1714, + 1715, 1711, 1717, 1718, 1719, 1720, 1704, 1716, + 1704, 1577, 1704, 1577, } var _hcltok_trans_targs []int16 = []int16{ - 1462, 1, 1462, 1462, 1462, 3, 4, 1470, - 1462, 5, 1471, 6, 7, 9, 10, 287, - 13, 14, 15, 16, 17, 288, 289, 20, - 290, 22, 23, 291, 292, 293, 294, 295, - 296, 297, 298, 299, 300, 329, 349, 354, - 128, 129, 130, 357, 152, 372, 376, 1462, - 11, 12, 18, 19, 21, 24, 25, 26, - 27, 28, 29, 30, 31, 32, 33, 65, - 106, 121, 132, 155, 171, 284, 34, 35, - 36, 37, 38, 39, 40, 41, 42, 43, - 44, 45, 46, 47, 48, 49, 50, 51, - 52, 53, 54, 55, 56, 57, 58, 59, - 60, 61, 62, 63, 64, 66, 67, 68, - 69, 70, 71, 72, 73, 74, 75, 76, - 77, 78, 79, 80, 81, 82, 83, 84, - 85, 86, 87, 88, 89, 90, 91, 92, - 93, 94, 95, 96, 97, 98, 99, 100, - 101, 102, 103, 104, 105, 107, 108, 109, - 110, 111, 112, 113, 114, 115, 116, 117, - 118, 119, 120, 122, 123, 124, 125, 126, - 127, 131, 133, 134, 135, 136, 137, 138, - 139, 140, 141, 142, 143, 144, 145, 146, - 147, 148, 149, 150, 151, 153, 154, 156, - 157, 158, 159, 160, 161, 162, 163, 164, - 165, 166, 167, 168, 169, 170, 172, 204, - 228, 231, 232, 234, 243, 244, 247, 251, - 269, 276, 278, 280, 282, 173, 174, 175, - 176, 177, 178, 179, 180, 181, 182, 183, - 184, 185, 186, 187, 188, 189, 190, 191, - 192, 193, 194, 195, 196, 197, 198, 199, - 200, 201, 202, 203, 205, 206, 207, 208, - 209, 210, 211, 212, 213, 214, 215, 216, - 217, 218, 219, 220, 221, 222, 223, 224, - 225, 226, 227, 229, 230, 233, 235, 236, - 237, 238, 239, 240, 241, 242, 245, 246, - 248, 249, 250, 252, 253, 254, 255, 256, - 257, 258, 259, 260, 261, 262, 263, 264, - 265, 266, 267, 268, 270, 271, 272, 273, - 274, 275, 277, 279, 281, 283, 285, 286, - 301, 302, 303, 304, 305, 306, 307, 308, - 309, 310, 311, 312, 313, 314, 315, 316, - 317, 318, 319, 320, 321, 322, 323, 324, - 325, 326, 327, 328, 330, 331, 332, 333, - 334, 335, 336, 337, 338, 339, 340, 341, - 342, 343, 344, 345, 346, 347, 348, 350, - 351, 352, 353, 355, 356, 358, 359, 360, - 361, 362, 363, 364, 365, 366, 367, 368, - 369, 370, 371, 373, 374, 375, 377, 383, - 405, 410, 412, 414, 378, 379, 380, 381, - 382, 384, 385, 386, 387, 388, 389, 390, - 391, 392, 393, 394, 395, 396, 397, 398, - 399, 400, 401, 402, 403, 404, 406, 407, - 408, 409, 411, 413, 415, 1462, 1475, 438, - 439, 440, 441, 418, 442, 443, 444, 445, - 446, 447, 448, 449, 450, 451, 452, 453, - 454, 455, 456, 457, 458, 459, 460, 461, - 462, 463, 464, 465, 466, 467, 468, 470, - 471, 472, 473, 474, 475, 476, 477, 478, - 479, 480, 481, 482, 483, 484, 485, 486, - 420, 487, 488, 489, 490, 491, 492, 493, - 494, 495, 496, 497, 498, 499, 500, 501, - 502, 503, 504, 419, 505, 506, 507, 508, - 509, 511, 512, 513, 514, 515, 516, 517, - 518, 519, 520, 521, 522, 523, 524, 526, - 527, 528, 529, 530, 531, 535, 537, 538, - 539, 540, 435, 541, 542, 543, 544, 545, - 546, 547, 548, 549, 550, 551, 552, 553, - 554, 555, 557, 558, 560, 561, 562, 563, - 564, 565, 433, 566, 567, 568, 569, 570, - 571, 572, 573, 574, 576, 608, 632, 635, - 636, 638, 647, 648, 651, 655, 673, 533, - 680, 682, 684, 686, 577, 578, 579, 580, - 581, 582, 583, 584, 585, 586, 587, 588, - 589, 590, 591, 592, 593, 594, 595, 596, - 597, 598, 599, 600, 601, 602, 603, 604, - 605, 606, 607, 609, 610, 611, 612, 613, - 614, 615, 616, 617, 618, 619, 620, 621, - 622, 623, 624, 625, 626, 627, 628, 629, - 630, 631, 633, 634, 637, 639, 640, 641, - 642, 643, 644, 645, 646, 649, 650, 652, - 653, 654, 656, 657, 658, 659, 660, 661, - 662, 663, 664, 665, 666, 667, 668, 669, - 670, 671, 672, 674, 675, 676, 677, 678, - 679, 681, 683, 685, 687, 689, 690, 1462, - 1462, 691, 828, 829, 760, 830, 831, 832, - 833, 834, 835, 789, 836, 725, 837, 838, - 839, 840, 841, 842, 843, 844, 745, 845, - 846, 847, 848, 849, 850, 851, 852, 853, - 854, 770, 855, 857, 858, 859, 860, 861, - 862, 863, 864, 865, 866, 703, 867, 868, - 869, 870, 871, 872, 873, 874, 875, 741, - 876, 877, 878, 879, 880, 811, 882, 883, - 886, 888, 889, 890, 891, 892, 893, 896, - 897, 899, 900, 901, 903, 904, 905, 906, - 907, 908, 909, 910, 911, 912, 913, 915, - 916, 917, 918, 921, 923, 924, 926, 928, - 1513, 1514, 930, 931, 1513, 933, 1527, 1527, - 1527, 1528, 937, 938, 1529, 1530, 1534, 1534, - 1534, 1535, 944, 945, 1536, 1537, 1541, 1542, - 1541, 971, 972, 973, 974, 951, 975, 976, - 977, 978, 979, 980, 981, 982, 983, 984, - 985, 986, 987, 988, 989, 990, 991, 992, - 993, 994, 995, 996, 997, 998, 999, 1000, - 1001, 1003, 1004, 1005, 1006, 1007, 1008, 1009, - 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, - 1018, 1019, 953, 1020, 1021, 1022, 1023, 1024, - 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1032, - 1033, 1034, 1035, 1036, 1037, 952, 1038, 1039, - 1040, 1041, 1042, 1044, 1045, 1046, 1047, 1048, - 1049, 1050, 1051, 1052, 1053, 1054, 1055, 1056, - 1057, 1059, 1060, 1061, 1062, 1063, 1064, 1068, - 1070, 1071, 1072, 1073, 968, 1074, 1075, 1076, - 1077, 1078, 1079, 1080, 1081, 1082, 1083, 1084, - 1085, 1086, 1087, 1088, 1090, 1091, 1093, 1094, - 1095, 1096, 1097, 1098, 966, 1099, 1100, 1101, - 1102, 1103, 1104, 1105, 1106, 1107, 1109, 1141, - 1165, 1168, 1169, 1171, 1180, 1181, 1184, 1188, - 1206, 1066, 1213, 1215, 1217, 1219, 1110, 1111, - 1112, 1113, 1114, 1115, 1116, 1117, 1118, 1119, - 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, - 1128, 1129, 1130, 1131, 1132, 1133, 1134, 1135, - 1136, 1137, 1138, 1139, 1140, 1142, 1143, 1144, - 1145, 1146, 1147, 1148, 1149, 1150, 1151, 1152, - 1153, 1154, 1155, 1156, 1157, 1158, 1159, 1160, - 1161, 1162, 1163, 1164, 1166, 1167, 1170, 1172, - 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1182, - 1183, 1185, 1186, 1187, 1189, 1190, 1191, 1192, - 1193, 1194, 1195, 1196, 1197, 1198, 1199, 1200, - 1201, 1202, 1203, 1204, 1205, 1207, 1208, 1209, - 1210, 1211, 1212, 1214, 1216, 1218, 1220, 1222, - 1223, 1541, 1541, 1224, 1361, 1362, 1293, 1363, - 1364, 1365, 1366, 1367, 1368, 1322, 1369, 1258, - 1370, 1371, 1372, 1373, 1374, 1375, 1376, 1377, - 1278, 1378, 1379, 1380, 1381, 1382, 1383, 1384, - 1385, 1386, 1387, 1303, 1388, 1390, 1391, 1392, - 1393, 1394, 1395, 1396, 1397, 1398, 1399, 1236, - 1400, 1401, 1402, 1403, 1404, 1405, 1406, 1407, - 1408, 1274, 1409, 1410, 1411, 1412, 1413, 1344, - 1415, 1416, 1419, 1421, 1422, 1423, 1424, 1425, - 1426, 1429, 1430, 1432, 1433, 1434, 1436, 1437, - 1438, 1439, 1440, 1441, 1442, 1443, 1444, 1445, - 1446, 1448, 1449, 1450, 1451, 1454, 1456, 1457, - 1459, 1461, 1463, 1462, 1464, 1465, 1462, 1466, - 1467, 1462, 1468, 1469, 1472, 1473, 1474, 1462, - 1476, 1462, 1477, 1462, 1478, 1479, 1480, 1481, - 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489, - 1490, 1491, 1492, 1493, 1494, 1495, 1496, 1497, - 1498, 1499, 1500, 1501, 1502, 1503, 1504, 1505, - 1506, 1507, 1508, 1509, 1510, 1511, 1512, 1462, - 1462, 1462, 1462, 1462, 2, 1462, 1462, 8, - 1462, 1462, 1462, 1462, 1462, 416, 417, 421, - 422, 423, 424, 425, 426, 427, 428, 429, - 430, 431, 432, 434, 436, 437, 469, 510, - 525, 532, 534, 536, 556, 559, 575, 688, - 1462, 1462, 1462, 692, 693, 694, 695, 696, - 697, 698, 699, 700, 701, 702, 704, 705, - 706, 707, 708, 709, 710, 711, 712, 713, - 714, 715, 716, 717, 718, 719, 720, 721, - 722, 723, 724, 726, 727, 728, 729, 730, - 731, 732, 733, 734, 735, 736, 737, 738, - 739, 740, 742, 743, 744, 746, 747, 748, - 749, 750, 751, 752, 753, 754, 755, 756, - 757, 758, 759, 761, 762, 763, 764, 765, - 766, 767, 768, 769, 771, 772, 773, 774, - 775, 776, 777, 778, 779, 780, 781, 782, - 783, 784, 785, 786, 787, 788, 790, 791, - 792, 793, 794, 795, 796, 797, 798, 799, - 800, 801, 802, 803, 804, 805, 806, 807, - 808, 809, 810, 812, 813, 814, 815, 816, - 817, 818, 819, 820, 821, 822, 823, 824, - 825, 826, 827, 856, 881, 884, 885, 887, - 894, 895, 898, 902, 914, 919, 920, 922, - 925, 927, 1513, 1513, 1520, 1522, 1515, 1513, - 1524, 1525, 1526, 1513, 929, 932, 1516, 1517, - 1518, 1519, 1513, 1521, 1513, 1513, 1523, 1513, - 1513, 1513, 934, 935, 940, 941, 1527, 1531, - 1532, 1533, 1527, 936, 939, 1527, 1527, 1527, - 1527, 1527, 942, 947, 948, 1534, 1538, 1539, - 1540, 1534, 943, 946, 1534, 1534, 1534, 1534, - 1534, 1541, 1543, 1544, 1545, 1546, 1547, 1548, - 1549, 1550, 1551, 1552, 1553, 1554, 1555, 1556, - 1557, 1558, 1559, 1560, 1561, 1562, 1563, 1564, - 1565, 1566, 1567, 1568, 1569, 1570, 1571, 1572, - 1573, 1574, 1575, 1576, 1577, 1541, 949, 950, - 954, 955, 956, 957, 958, 959, 960, 961, - 962, 963, 964, 965, 967, 969, 970, 1002, - 1043, 1058, 1065, 1067, 1069, 1089, 1092, 1108, - 1221, 1541, 1225, 1226, 1227, 1228, 1229, 1230, - 1231, 1232, 1233, 1234, 1235, 1237, 1238, 1239, - 1240, 1241, 1242, 1243, 1244, 1245, 1246, 1247, - 1248, 1249, 1250, 1251, 1252, 1253, 1254, 1255, - 1256, 1257, 1259, 1260, 1261, 1262, 1263, 1264, - 1265, 1266, 1267, 1268, 1269, 1270, 1271, 1272, - 1273, 1275, 1276, 1277, 1279, 1280, 1281, 1282, - 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1290, - 1291, 1292, 1294, 1295, 1296, 1297, 1298, 1299, - 1300, 1301, 1302, 1304, 1305, 1306, 1307, 1308, - 1309, 1310, 1311, 1312, 1313, 1314, 1315, 1316, - 1317, 1318, 1319, 1320, 1321, 1323, 1324, 1325, - 1326, 1327, 1328, 1329, 1330, 1331, 1332, 1333, - 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1341, - 1342, 1343, 1345, 1346, 1347, 1348, 1349, 1350, - 1351, 1352, 1353, 1354, 1355, 1356, 1357, 1358, - 1359, 1360, 1389, 1414, 1417, 1418, 1420, 1427, - 1428, 1431, 1435, 1447, 1452, 1453, 1455, 1458, - 1460, + 1464, 1, 1464, 1464, 1464, 3, 4, 1464, + 5, 1472, 6, 7, 9, 10, 287, 13, + 14, 15, 16, 17, 288, 289, 20, 290, + 22, 23, 291, 292, 293, 294, 295, 296, + 297, 298, 299, 300, 329, 349, 354, 128, + 129, 130, 357, 152, 372, 376, 1464, 11, + 12, 18, 19, 21, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 65, 106, + 121, 132, 155, 171, 284, 34, 35, 36, + 37, 38, 39, 40, 41, 42, 43, 44, + 45, 46, 47, 48, 49, 50, 51, 52, + 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, + 78, 79, 80, 81, 82, 83, 84, 85, + 86, 87, 88, 89, 90, 91, 92, 93, + 94, 95, 96, 97, 98, 99, 100, 101, + 102, 103, 104, 105, 107, 108, 109, 110, + 111, 112, 113, 114, 115, 116, 117, 118, + 119, 120, 122, 123, 124, 125, 126, 127, + 131, 133, 134, 135, 136, 137, 138, 139, + 140, 141, 142, 143, 144, 145, 146, 147, + 148, 149, 150, 151, 153, 154, 156, 157, + 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 172, 204, 228, + 231, 232, 234, 243, 244, 247, 251, 269, + 276, 278, 280, 282, 173, 174, 175, 176, + 177, 178, 179, 180, 181, 182, 183, 184, + 185, 186, 187, 188, 189, 190, 191, 192, + 193, 194, 195, 196, 197, 198, 199, 200, + 201, 202, 203, 205, 206, 207, 208, 209, + 210, 211, 212, 213, 214, 215, 216, 217, + 218, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 229, 230, 233, 235, 236, 237, + 238, 239, 240, 241, 242, 245, 246, 248, + 249, 250, 252, 253, 254, 255, 256, 257, + 258, 259, 260, 261, 262, 263, 264, 265, + 266, 267, 268, 270, 271, 272, 273, 274, + 275, 277, 279, 281, 283, 285, 286, 301, + 302, 303, 304, 305, 306, 307, 308, 309, + 310, 311, 312, 313, 314, 315, 316, 317, + 318, 319, 320, 321, 322, 323, 324, 325, + 326, 327, 328, 330, 331, 332, 333, 334, + 335, 336, 337, 338, 339, 340, 341, 342, + 343, 344, 345, 346, 347, 348, 350, 351, + 352, 353, 355, 356, 358, 359, 360, 361, + 362, 363, 364, 365, 366, 367, 368, 369, + 370, 371, 373, 374, 375, 377, 383, 405, + 410, 412, 414, 378, 379, 380, 381, 382, + 384, 385, 386, 387, 388, 389, 390, 391, + 392, 393, 394, 395, 396, 397, 398, 399, + 400, 401, 402, 403, 404, 406, 407, 408, + 409, 411, 413, 415, 1464, 1476, 438, 439, + 440, 441, 418, 442, 443, 444, 445, 446, + 447, 448, 449, 450, 451, 452, 453, 454, + 455, 456, 457, 458, 459, 460, 461, 462, + 463, 464, 465, 466, 467, 468, 470, 471, + 472, 473, 474, 475, 476, 477, 478, 479, + 480, 481, 482, 483, 484, 485, 486, 420, + 487, 488, 489, 490, 491, 492, 493, 494, + 495, 496, 497, 498, 499, 500, 501, 502, + 503, 504, 419, 505, 506, 507, 508, 509, + 511, 512, 513, 514, 515, 516, 517, 518, + 519, 520, 521, 522, 523, 524, 526, 527, + 528, 529, 530, 531, 535, 537, 538, 539, + 540, 435, 541, 542, 543, 544, 545, 546, + 547, 548, 549, 550, 551, 552, 553, 554, + 555, 557, 558, 560, 561, 562, 563, 564, + 565, 433, 566, 567, 568, 569, 570, 571, + 572, 573, 574, 576, 608, 632, 635, 636, + 638, 647, 648, 651, 655, 673, 533, 680, + 682, 684, 686, 577, 578, 579, 580, 581, + 582, 583, 584, 585, 586, 587, 588, 589, + 590, 591, 592, 593, 594, 595, 596, 597, + 598, 599, 600, 601, 602, 603, 604, 605, + 606, 607, 609, 610, 611, 612, 613, 614, + 615, 616, 617, 618, 619, 620, 621, 622, + 623, 624, 625, 626, 627, 628, 629, 630, + 631, 633, 634, 637, 639, 640, 641, 642, + 643, 644, 645, 646, 649, 650, 652, 653, + 654, 656, 657, 658, 659, 660, 661, 662, + 663, 664, 665, 666, 667, 668, 669, 670, + 671, 672, 674, 675, 676, 677, 678, 679, + 681, 683, 685, 687, 689, 690, 1464, 1464, + 691, 828, 829, 760, 830, 831, 832, 833, + 834, 835, 789, 836, 725, 837, 838, 839, + 840, 841, 842, 843, 844, 745, 845, 846, + 847, 848, 849, 850, 851, 852, 853, 854, + 770, 855, 857, 858, 859, 860, 861, 862, + 863, 864, 865, 866, 703, 867, 868, 869, + 870, 871, 872, 873, 874, 875, 741, 876, + 877, 878, 879, 880, 811, 882, 883, 886, + 888, 889, 890, 891, 892, 893, 896, 897, + 899, 900, 901, 903, 904, 905, 906, 907, + 908, 909, 910, 911, 912, 913, 915, 916, + 917, 918, 921, 923, 924, 926, 928, 1514, + 1516, 1517, 1515, 931, 932, 1514, 934, 1540, + 1540, 1540, 1542, 1543, 1541, 939, 940, 1544, + 1545, 1549, 1549, 1549, 1550, 946, 947, 1551, + 1552, 1556, 1557, 1556, 973, 974, 975, 976, + 953, 977, 978, 979, 980, 981, 982, 983, + 984, 985, 986, 987, 988, 989, 990, 991, + 992, 993, 994, 995, 996, 997, 998, 999, + 1000, 1001, 1002, 1003, 1005, 1006, 1007, 1008, + 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, + 1017, 1018, 1019, 1020, 1021, 955, 1022, 1023, + 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, + 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039, + 954, 1040, 1041, 1042, 1043, 1044, 1046, 1047, + 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, + 1056, 1057, 1058, 1059, 1061, 1062, 1063, 1064, + 1065, 1066, 1070, 1072, 1073, 1074, 1075, 970, + 1076, 1077, 1078, 1079, 1080, 1081, 1082, 1083, + 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1092, + 1093, 1095, 1096, 1097, 1098, 1099, 1100, 968, + 1101, 1102, 1103, 1104, 1105, 1106, 1107, 1108, + 1109, 1111, 1143, 1167, 1170, 1171, 1173, 1182, + 1183, 1186, 1190, 1208, 1068, 1215, 1217, 1219, + 1221, 1112, 1113, 1114, 1115, 1116, 1117, 1118, + 1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, + 1127, 1128, 1129, 1130, 1131, 1132, 1133, 1134, + 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, + 1144, 1145, 1146, 1147, 1148, 1149, 1150, 1151, + 1152, 1153, 1154, 1155, 1156, 1157, 1158, 1159, + 1160, 1161, 1162, 1163, 1164, 1165, 1166, 1168, + 1169, 1172, 1174, 1175, 1176, 1177, 1178, 1179, + 1180, 1181, 1184, 1185, 1187, 1188, 1189, 1191, + 1192, 1193, 1194, 1195, 1196, 1197, 1198, 1199, + 1200, 1201, 1202, 1203, 1204, 1205, 1206, 1207, + 1209, 1210, 1211, 1212, 1213, 1214, 1216, 1218, + 1220, 1222, 1224, 1225, 1556, 1556, 1226, 1363, + 1364, 1295, 1365, 1366, 1367, 1368, 1369, 1370, + 1324, 1371, 1260, 1372, 1373, 1374, 1375, 1376, + 1377, 1378, 1379, 1280, 1380, 1381, 1382, 1383, + 1384, 1385, 1386, 1387, 1388, 1389, 1305, 1390, + 1392, 1393, 1394, 1395, 1396, 1397, 1398, 1399, + 1400, 1401, 1238, 1402, 1403, 1404, 1405, 1406, + 1407, 1408, 1409, 1410, 1276, 1411, 1412, 1413, + 1414, 1415, 1346, 1417, 1418, 1421, 1423, 1424, + 1425, 1426, 1427, 1428, 1431, 1432, 1434, 1435, + 1436, 1438, 1439, 1440, 1441, 1442, 1443, 1444, + 1445, 1446, 1447, 1448, 1450, 1451, 1452, 1453, + 1456, 1458, 1459, 1461, 1463, 1465, 1464, 1466, + 1467, 1464, 1468, 1464, 1469, 1470, 1471, 1473, + 1474, 1475, 1464, 1477, 1464, 1478, 1464, 1479, + 1480, 1481, 1482, 1483, 1484, 1485, 1486, 1487, + 1488, 1489, 1490, 1491, 1492, 1493, 1494, 1495, + 1496, 1497, 1498, 1499, 1500, 1501, 1502, 1503, + 1504, 1505, 1506, 1507, 1508, 1509, 1510, 1511, + 1512, 1513, 1464, 1464, 1464, 1464, 1464, 2, + 1464, 8, 1464, 1464, 1464, 1464, 1464, 416, + 417, 421, 422, 423, 424, 425, 426, 427, + 428, 429, 430, 431, 432, 434, 436, 437, + 469, 510, 525, 532, 534, 536, 556, 559, + 575, 688, 1464, 1464, 1464, 692, 693, 694, + 695, 696, 697, 698, 699, 700, 701, 702, + 704, 705, 706, 707, 708, 709, 710, 711, + 712, 713, 714, 715, 716, 717, 718, 719, + 720, 721, 722, 723, 724, 726, 727, 728, + 729, 730, 731, 732, 733, 734, 735, 736, + 737, 738, 739, 740, 742, 743, 744, 746, + 747, 748, 749, 750, 751, 752, 753, 754, + 755, 756, 757, 758, 759, 761, 762, 763, + 764, 765, 766, 767, 768, 769, 771, 772, + 773, 774, 775, 776, 777, 778, 779, 780, + 781, 782, 783, 784, 785, 786, 787, 788, + 790, 791, 792, 793, 794, 795, 796, 797, + 798, 799, 800, 801, 802, 803, 804, 805, + 806, 807, 808, 809, 810, 812, 813, 814, + 815, 816, 817, 818, 819, 820, 821, 822, + 823, 824, 825, 826, 827, 856, 881, 884, + 885, 887, 894, 895, 898, 902, 914, 919, + 920, 922, 925, 927, 1514, 1514, 1533, 1535, + 1518, 1514, 1537, 1538, 1539, 1514, 929, 930, + 933, 1514, 1515, 929, 930, 1518, 931, 932, + 933, 1514, 1515, 929, 930, 1518, 931, 932, + 933, 1519, 1524, 1520, 1521, 1523, 1530, 1531, + 1532, 1516, 1520, 1521, 1523, 1530, 1531, 1532, + 1517, 1522, 1525, 1526, 1527, 1528, 1529, 1516, + 1520, 1521, 1523, 1530, 1531, 1532, 1519, 1524, + 1522, 1525, 1526, 1527, 1528, 1529, 1517, 1522, + 1525, 1526, 1527, 1528, 1529, 1519, 1524, 1514, + 1534, 1514, 1514, 1536, 1514, 1514, 1514, 935, + 936, 942, 943, 1540, 1546, 1547, 1548, 1540, + 937, 938, 941, 1540, 1541, 1540, 936, 937, + 938, 939, 940, 941, 1540, 1541, 1540, 936, + 937, 938, 939, 940, 941, 1540, 1540, 1540, + 1540, 1540, 944, 949, 950, 1549, 1553, 1554, + 1555, 1549, 945, 948, 1549, 1549, 1549, 1549, + 1549, 1556, 1558, 1559, 1560, 1561, 1562, 1563, + 1564, 1565, 1566, 1567, 1568, 1569, 1570, 1571, + 1572, 1573, 1574, 1575, 1576, 1577, 1578, 1579, + 1580, 1581, 1582, 1583, 1584, 1585, 1586, 1587, + 1588, 1589, 1590, 1591, 1592, 1556, 951, 952, + 956, 957, 958, 959, 960, 961, 962, 963, + 964, 965, 966, 967, 969, 971, 972, 1004, + 1045, 1060, 1067, 1069, 1071, 1091, 1094, 1110, + 1223, 1556, 1227, 1228, 1229, 1230, 1231, 1232, + 1233, 1234, 1235, 1236, 1237, 1239, 1240, 1241, + 1242, 1243, 1244, 1245, 1246, 1247, 1248, 1249, + 1250, 1251, 1252, 1253, 1254, 1255, 1256, 1257, + 1258, 1259, 1261, 1262, 1263, 1264, 1265, 1266, + 1267, 1268, 1269, 1270, 1271, 1272, 1273, 1274, + 1275, 1277, 1278, 1279, 1281, 1282, 1283, 1284, + 1285, 1286, 1287, 1288, 1289, 1290, 1291, 1292, + 1293, 1294, 1296, 1297, 1298, 1299, 1300, 1301, + 1302, 1303, 1304, 1306, 1307, 1308, 1309, 1310, + 1311, 1312, 1313, 1314, 1315, 1316, 1317, 1318, + 1319, 1320, 1321, 1322, 1323, 1325, 1326, 1327, + 1328, 1329, 1330, 1331, 1332, 1333, 1334, 1335, + 1336, 1337, 1338, 1339, 1340, 1341, 1342, 1343, + 1344, 1345, 1347, 1348, 1349, 1350, 1351, 1352, + 1353, 1354, 1355, 1356, 1357, 1358, 1359, 1360, + 1361, 1362, 1391, 1416, 1419, 1420, 1422, 1429, + 1430, 1433, 1437, 1449, 1454, 1455, 1457, 1460, + 1462, } var _hcltok_trans_actions []byte = []byte{ - 143, 0, 85, 139, 101, 0, 0, 169, - 135, 0, 5, 0, 0, 0, 0, 0, + 149, 0, 93, 145, 109, 0, 0, 141, + 0, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 115, + 0, 0, 0, 0, 0, 0, 123, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -3444,7 +3516,7 @@ var _hcltok_trans_actions []byte = []byte{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 137, 166, 0, + 0, 0, 0, 0, 143, 196, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -3475,8 +3547,7 @@ var _hcltok_trans_actions []byte = []byte{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 141, - 119, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 147, 127, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -3488,10 +3559,11 @@ var _hcltok_trans_actions []byte = []byte{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 27, 5, 0, 0, 29, 0, 49, 35, - 47, 148, 0, 0, 0, 0, 69, 55, - 67, 154, 0, 0, 0, 0, 79, 160, - 83, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 35, + 13, 13, 13, 0, 0, 37, 0, 57, + 43, 55, 178, 178, 178, 0, 0, 0, + 0, 77, 63, 75, 184, 0, 0, 0, + 0, 87, 190, 91, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -3522,7 +3594,7 @@ var _hcltok_trans_actions []byte = []byte{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 81, 73, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 89, 81, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -3534,19 +3606,19 @@ var _hcltok_trans_actions []byte = []byte{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 87, 0, 0, 113, 178, - 0, 105, 5, 172, 5, 0, 0, 107, - 0, 109, 0, 117, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 95, 0, + 0, 121, 205, 113, 0, 13, 199, 13, + 0, 0, 115, 0, 117, 0, 125, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 5, 5, 5, 175, 175, 175, - 175, 175, 175, 5, 5, 175, 5, 121, - 133, 129, 91, 97, 0, 127, 123, 0, - 95, 89, 103, 93, 125, 0, 0, 0, + 0, 0, 0, 0, 0, 13, 13, 13, + 202, 202, 202, 202, 202, 202, 13, 13, + 202, 13, 129, 139, 135, 99, 105, 0, + 131, 0, 103, 97, 111, 101, 133, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 99, 111, 131, 0, 0, 0, 0, 0, + 0, 0, 107, 119, 137, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -3564,22 +3636,32 @@ var _hcltok_trans_actions []byte = []byte{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 13, 11, 0, 0, 5, 15, - 0, 5, 5, 21, 0, 0, 0, 5, - 5, 5, 23, 0, 17, 7, 0, 19, - 9, 25, 0, 0, 0, 0, 37, 0, - 151, 151, 43, 0, 0, 39, 31, 41, - 33, 45, 0, 0, 0, 57, 0, 157, - 157, 63, 0, 0, 59, 51, 61, 53, - 65, 71, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 21, 19, 0, 0, + 13, 23, 0, 13, 13, 29, 0, 0, + 0, 151, 172, 1, 1, 172, 1, 1, + 1, 154, 175, 3, 3, 175, 3, 3, + 3, 0, 0, 0, 0, 13, 13, 13, + 13, 172, 1, 1, 172, 172, 172, 172, + 172, 1, 1, 172, 172, 172, 172, 175, + 3, 3, 175, 175, 175, 175, 1, 1, + 0, 0, 13, 13, 13, 13, 175, 3, + 3, 175, 175, 175, 175, 3, 3, 31, + 0, 25, 15, 0, 27, 17, 33, 0, + 0, 0, 0, 45, 0, 181, 181, 51, + 0, 0, 0, 160, 208, 157, 5, 5, + 5, 5, 5, 5, 166, 212, 163, 7, + 7, 7, 7, 7, 7, 47, 39, 49, + 41, 53, 0, 0, 0, 65, 0, 187, + 187, 71, 0, 0, 67, 59, 69, 61, + 73, 79, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 5, 5, 5, 163, 163, 163, 163, 163, - 163, 5, 5, 163, 5, 75, 0, 0, + 13, 13, 13, 193, 193, 193, 193, 193, + 193, 13, 13, 193, 13, 83, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 77, 0, 0, 0, 0, 0, 0, + 0, 85, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -3783,22 +3865,24 @@ var _hcltok_to_state_actions []byte = []byte{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 145, - 0, 0, 0, 0, 0, 0, 145, 0, - 0, 0, 0, 0, 0, 1, 0, 0, + 0, 0, 9, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 169, 0, 0, 0, + 0, 0, 0, 0, 0, 169, 0, 0, + 0, 0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, } var _hcltok_from_state_actions []byte = []byte{ @@ -3984,26 +4068,28 @@ var _hcltok_from_state_actions []byte = []byte{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 3, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 3, - 0, 0, 0, 0, 0, 0, 3, 0, - 0, 0, 0, 0, 0, 3, 0, 0, + 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 11, 0, 0, 0, + 0, 0, 0, 0, 0, 11, 0, 0, + 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, } var _hcltok_eof_trans []int16 = []int16{ - 0, 1, 4, 1, 1, 9, 9, 9, + 0, 1, 4, 4, 4, 8, 8, 8, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, @@ -4055,174 +4141,181 @@ var _hcltok_eof_trans []int16 = []int16{ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 422, 422, 1, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 672, 672, 672, 672, 672, - 672, 672, 672, 672, 672, 672, 672, 672, - 672, 672, 672, 672, 672, 672, 672, 672, - 672, 672, 672, 672, 672, 672, 672, 672, - 672, 672, 672, 672, 672, 672, 672, 672, - 672, 672, 672, 672, 672, 672, 672, 672, - 672, 672, 672, 672, 672, 672, 672, 672, - 672, 672, 672, 672, 672, 672, 672, 672, - 672, 672, 672, 672, 672, 672, 672, 672, - 672, 672, 672, 672, 672, 672, 672, 672, - 672, 672, 672, 672, 672, 672, 672, 672, - 672, 672, 672, 672, 672, 672, 672, 672, - 672, 672, 672, 672, 672, 672, 672, 672, - 672, 672, 672, 672, 672, 672, 672, 672, - 672, 672, 672, 672, 672, 672, 672, 672, - 672, 672, 672, 672, 672, 672, 672, 672, - 672, 672, 672, 672, 672, 672, 672, 672, - 672, 672, 672, 672, 672, 672, 672, 672, - 672, 672, 672, 672, 672, 672, 672, 672, - 672, 672, 672, 672, 672, 672, 672, 672, - 672, 672, 672, 672, 672, 672, 672, 672, - 672, 672, 672, 672, 672, 672, 672, 672, - 672, 672, 672, 672, 672, 672, 672, 672, - 672, 672, 672, 672, 672, 672, 672, 672, - 672, 672, 672, 672, 672, 672, 672, 672, - 672, 672, 672, 672, 672, 672, 672, 672, - 672, 672, 672, 672, 672, 672, 672, 672, - 672, 672, 672, 672, 672, 672, 672, 672, - 672, 672, 672, 672, 672, 672, 672, 672, - 672, 672, 672, 672, 672, 672, 672, 672, - 672, 769, 769, 769, 769, 773, 773, 775, - 777, 775, 775, 777, 0, 0, 783, 785, - 783, 783, 785, 0, 0, 791, 791, 793, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 791, 791, 791, 791, 791, 791, 791, 791, - 1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, - 1042, 1042, 1042, 1042, 1042, 1042, 0, 1192, - 1193, 1194, 1193, 1194, 1194, 1194, 1198, 1199, - 1194, 1194, 1194, 1205, 1194, 1194, 1235, 1235, - 1235, 1235, 1235, 1235, 1235, 1235, 1235, 1235, - 1235, 1235, 1235, 1235, 1235, 1235, 1235, 1235, - 1235, 1235, 1235, 1235, 1235, 1235, 1235, 1235, - 1235, 1235, 1235, 1235, 1235, 1235, 1235, 1235, - 1235, 0, 1388, 1388, 1388, 1388, 1388, 1388, - 1395, 1397, 1395, 1400, 1402, 1402, 1402, 0, - 1411, 1414, 1416, 1418, 1418, 1418, 0, 1426, - 1429, 1431, 1433, 1433, 1433, 0, 1470, 1498, - 1498, 1498, 1498, 1498, 1498, 1498, 1498, 1498, - 1498, 1498, 1498, 1498, 1498, 1498, 1498, 1498, - 1498, 1498, 1498, 1498, 1498, 1498, 1498, 1498, - 1498, 1498, 1498, 1498, 1498, 1498, 1498, 1498, - 1498, 1498, + 421, 421, 1, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 421, 421, 421, 421, 421, + 421, 421, 421, 671, 671, 671, 671, 671, + 671, 671, 671, 671, 671, 671, 671, 671, + 671, 671, 671, 671, 671, 671, 671, 671, + 671, 671, 671, 671, 671, 671, 671, 671, + 671, 671, 671, 671, 671, 671, 671, 671, + 671, 671, 671, 671, 671, 671, 671, 671, + 671, 671, 671, 671, 671, 671, 671, 671, + 671, 671, 671, 671, 671, 671, 671, 671, + 671, 671, 671, 671, 671, 671, 671, 671, + 671, 671, 671, 671, 671, 671, 671, 671, + 671, 671, 671, 671, 671, 671, 671, 671, + 671, 671, 671, 671, 671, 671, 671, 671, + 671, 671, 671, 671, 671, 671, 671, 671, + 671, 671, 671, 671, 671, 671, 671, 671, + 671, 671, 671, 671, 671, 671, 671, 671, + 671, 671, 671, 671, 671, 671, 671, 671, + 671, 671, 671, 671, 671, 671, 671, 671, + 671, 671, 671, 671, 671, 671, 671, 671, + 671, 671, 671, 671, 671, 671, 671, 671, + 671, 671, 671, 671, 671, 671, 671, 671, + 671, 671, 671, 671, 671, 671, 671, 671, + 671, 671, 671, 671, 671, 671, 671, 671, + 671, 671, 671, 671, 671, 671, 671, 671, + 671, 671, 671, 671, 671, 671, 671, 671, + 671, 671, 671, 671, 671, 671, 671, 671, + 671, 671, 671, 671, 671, 671, 671, 671, + 671, 671, 671, 671, 671, 671, 671, 671, + 671, 671, 671, 671, 671, 671, 671, 671, + 671, 671, 671, 671, 671, 671, 671, 671, + 671, 671, 671, 671, 671, 671, 671, 671, + 671, 768, 768, 768, 768, 768, 774, 774, + 776, 778, 778, 776, 776, 778, 0, 0, + 786, 788, 786, 786, 788, 0, 0, 794, + 794, 796, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 794, 794, 794, 794, 794, 794, + 794, 794, 1045, 1045, 1045, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, + 1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, + 0, 1195, 1196, 1197, 1196, 1197, 1197, 1197, + 1201, 1197, 1197, 1197, 1207, 1197, 1197, 1237, + 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237, + 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237, + 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237, + 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237, + 1237, 1237, 0, 1390, 1394, 1402, 1390, 1390, + 1394, 1394, 1402, 1394, 1390, 1402, 1402, 1402, + 1402, 1402, 1394, 1394, 1394, 1456, 1458, 1456, + 1461, 1463, 1463, 1463, 0, 1472, 1476, 1485, + 1494, 1496, 1498, 1498, 1498, 0, 1506, 1509, + 1511, 1513, 1513, 1513, 0, 1550, 1578, 1578, + 1578, 1578, 1578, 1578, 1578, 1578, 1578, 1578, + 1578, 1578, 1578, 1578, 1578, 1578, 1578, 1578, + 1578, 1578, 1578, 1578, 1578, 1578, 1578, 1578, + 1578, 1578, 1578, 1578, 1578, 1578, 1578, 1578, + 1578, } -const hcltok_start int = 1462 -const hcltok_first_final int = 1462 +const hcltok_start int = 1464 +const hcltok_first_final int = 1464 const hcltok_error int = 0 -const hcltok_en_stringTemplate int = 1513 -const hcltok_en_heredocTemplate int = 1527 -const hcltok_en_bareTemplate int = 1534 -const hcltok_en_identOnly int = 1541 -const hcltok_en_main int = 1462 +const hcltok_en_stringTemplate int = 1514 +const hcltok_en_heredocTemplate int = 1540 +const hcltok_en_bareTemplate int = 1549 +const hcltok_en_identOnly int = 1556 +const hcltok_en_main int = 1464 -// line 16 "scan_tokens.rl" +//line scan_tokens.rl:16 func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []Token { + stripData := stripUTF8BOM(data) + start.Byte += len(data) - len(stripData) + data = stripData + f := &tokenAccum{ - Filename: filename, - Bytes: data, - Pos: start, + Filename: filename, + Bytes: data, + Pos: start, + StartByte: start.Byte, } - // line 276 "scan_tokens.rl" +//line scan_tokens.rl:299 // Ragel state p := 0 // "Pointer" into data @@ -4250,7 +4343,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To var retBraces []int // stack of brace levels that cause us to use fret var heredocs []heredocInProgress // stack of heredocs we're currently processing - // line 311 "scan_tokens.rl" +//line scan_tokens.rl:334 // Make Go compiler happy _ = ts @@ -4270,7 +4363,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To f.emitToken(TokenType(b[0]), ts, te) } - // line 4282 "scan_tokens.go" +//line scan_tokens.go:4375 { top = 0 ts = 0 @@ -4278,7 +4371,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To act = 0 } - // line 4290 "scan_tokens.go" +//line scan_tokens.go:4383 { var _klen int var _trans int @@ -4298,12 +4391,11 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To for ; _nacts > 0; _nacts-- { _acts++ switch _hcltok_actions[_acts-1] { - case 2: - // line 1 "NONE" - + case 6: +//line NONE:1 ts = p - // line 4314 "scan_tokens.go" +//line scan_tokens.go:4406 } } @@ -4374,14 +4466,28 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To for ; _nacts > 0; _nacts-- { _acts++ switch _hcltok_actions[_acts-1] { + case 0: +//line scan_tokens.rl:223 + p-- + + case 1: +//line scan_tokens.rl:224 + p-- + + case 2: +//line scan_tokens.rl:229 + p-- + case 3: - // line 1 "NONE" +//line scan_tokens.rl:230 + p-- + case 7: +//line NONE:1 te = p + 1 - case 4: - // line 137 "scan_tokens.rl" - + case 8: +//line scan_tokens.rl:160 te = p + 1 { token(TokenTemplateInterp) @@ -4394,13 +4500,12 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To stack = append(stack, 0) stack[top] = cs top++ - cs = 1462 + cs = 1464 goto _again } } - case 5: - // line 147 "scan_tokens.rl" - + case 9: +//line scan_tokens.rl:170 te = p + 1 { token(TokenTemplateControl) @@ -4413,13 +4518,12 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To stack = append(stack, 0) stack[top] = cs top++ - cs = 1462 + cs = 1464 goto _again } } - case 6: - // line 79 "scan_tokens.rl" - + case 10: +//line scan_tokens.rl:84 te = p + 1 { token(TokenCQuote) @@ -4431,23 +4535,20 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To goto _again } - case 7: - // line 221 "scan_tokens.rl" - + case 11: +//line scan_tokens.rl:244 te = p + 1 { token(TokenInvalid) } - case 8: - // line 222 "scan_tokens.rl" - + case 12: +//line scan_tokens.rl:245 te = p + 1 { token(TokenBadUTF8) } - case 9: - // line 137 "scan_tokens.rl" - + case 13: +//line scan_tokens.rl:160 te = p p-- { @@ -4461,13 +4562,12 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To stack = append(stack, 0) stack[top] = cs top++ - cs = 1462 + cs = 1464 goto _again } } - case 10: - // line 147 "scan_tokens.rl" - + case 14: +//line scan_tokens.rl:170 te = p p-- { @@ -4481,59 +4581,51 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To stack = append(stack, 0) stack[top] = cs top++ - cs = 1462 + cs = 1464 goto _again } } - case 11: - // line 220 "scan_tokens.rl" - + case 15: +//line scan_tokens.rl:243 te = p p-- { token(TokenQuotedLit) } - case 12: - // line 221 "scan_tokens.rl" - + case 16: +//line scan_tokens.rl:244 te = p p-- { token(TokenInvalid) } - case 13: - // line 222 "scan_tokens.rl" - + case 17: +//line scan_tokens.rl:245 te = p p-- { token(TokenBadUTF8) } - case 14: - // line 220 "scan_tokens.rl" - + case 18: +//line scan_tokens.rl:243 p = (te) - 1 { token(TokenQuotedLit) } - case 15: - // line 222 "scan_tokens.rl" - + case 19: +//line scan_tokens.rl:245 p = (te) - 1 { token(TokenBadUTF8) } - case 16: - // line 125 "scan_tokens.rl" - + case 20: +//line scan_tokens.rl:148 act = 10 - case 17: - // line 230 "scan_tokens.rl" - + case 21: +//line scan_tokens.rl:253 act = 11 - case 18: - // line 137 "scan_tokens.rl" - + case 22: +//line scan_tokens.rl:160 te = p + 1 { token(TokenTemplateInterp) @@ -4546,13 +4638,12 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To stack = append(stack, 0) stack[top] = cs top++ - cs = 1462 + cs = 1464 goto _again } } - case 19: - // line 147 "scan_tokens.rl" - + case 23: +//line scan_tokens.rl:170 te = p + 1 { token(TokenTemplateControl) @@ -4565,13 +4656,12 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To stack = append(stack, 0) stack[top] = cs top++ - cs = 1462 + cs = 1464 goto _again } } - case 20: - // line 106 "scan_tokens.rl" - + case 24: +//line scan_tokens.rl:111 te = p + 1 { // This action is called specificially when a heredoc literal @@ -4582,7 +4672,25 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To if topdoc.StartOfLine { maybeMarker := bytes.TrimSpace(data[ts:te]) if bytes.Equal(maybeMarker, topdoc.Marker) { + // We actually emit two tokens here: the end-of-heredoc + // marker first, and then separately the newline that + // follows it. This then avoids issues with the closing + // marker consuming a newline that would normally be used + // to mark the end of an attribute definition. + // We might have either a \n sequence or an \r\n sequence + // here, so we must handle both. + nls := te - 1 + nle := te + te-- + if data[te-1] == '\r' { + // back up one more byte + nls-- + te-- + } token(TokenCHeredoc) + ts = nls + te = nle + token(TokenNewline) heredocs = heredocs[:len(heredocs)-1] top-- cs = stack[top] @@ -4597,16 +4705,14 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To topdoc.StartOfLine = true token(TokenStringLit) } - case 21: - // line 230 "scan_tokens.rl" - + case 25: +//line scan_tokens.rl:253 te = p + 1 { token(TokenBadUTF8) } - case 22: - // line 137 "scan_tokens.rl" - + case 26: +//line scan_tokens.rl:160 te = p p-- { @@ -4620,13 +4726,12 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To stack = append(stack, 0) stack[top] = cs top++ - cs = 1462 + cs = 1464 goto _again } } - case 23: - // line 147 "scan_tokens.rl" - + case 27: +//line scan_tokens.rl:170 te = p p-- { @@ -4640,13 +4745,12 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To stack = append(stack, 0) stack[top] = cs top++ - cs = 1462 + cs = 1464 goto _again } } - case 24: - // line 125 "scan_tokens.rl" - + case 28: +//line scan_tokens.rl:148 te = p p-- { @@ -4656,17 +4760,15 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To heredocs[len(heredocs)-1].StartOfLine = false token(TokenStringLit) } - case 25: - // line 230 "scan_tokens.rl" - + case 29: +//line scan_tokens.rl:253 te = p p-- { token(TokenBadUTF8) } - case 26: - // line 125 "scan_tokens.rl" - + case 30: +//line scan_tokens.rl:148 p = (te) - 1 { // This action is called when a heredoc literal _doesn't_ end @@ -4675,9 +4777,8 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To heredocs[len(heredocs)-1].StartOfLine = false token(TokenStringLit) } - case 27: - // line 1 "NONE" - + case 31: +//line NONE:1 switch act { case 0: { @@ -4701,17 +4802,14 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To } } - case 28: - // line 133 "scan_tokens.rl" - + case 32: +//line scan_tokens.rl:156 act = 14 - case 29: - // line 237 "scan_tokens.rl" - + case 33: +//line scan_tokens.rl:260 act = 15 - case 30: - // line 137 "scan_tokens.rl" - + case 34: +//line scan_tokens.rl:160 te = p + 1 { token(TokenTemplateInterp) @@ -4724,13 +4822,12 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To stack = append(stack, 0) stack[top] = cs top++ - cs = 1462 + cs = 1464 goto _again } } - case 31: - // line 147 "scan_tokens.rl" - + case 35: +//line scan_tokens.rl:170 te = p + 1 { token(TokenTemplateControl) @@ -4743,27 +4840,24 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To stack = append(stack, 0) stack[top] = cs top++ - cs = 1462 + cs = 1464 goto _again } } - case 32: - // line 133 "scan_tokens.rl" - + case 36: +//line scan_tokens.rl:156 te = p + 1 { token(TokenStringLit) } - case 33: - // line 237 "scan_tokens.rl" - + case 37: +//line scan_tokens.rl:260 te = p + 1 { token(TokenBadUTF8) } - case 34: - // line 137 "scan_tokens.rl" - + case 38: +//line scan_tokens.rl:160 te = p p-- { @@ -4777,13 +4871,12 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To stack = append(stack, 0) stack[top] = cs top++ - cs = 1462 + cs = 1464 goto _again } } - case 35: - // line 147 "scan_tokens.rl" - + case 39: +//line scan_tokens.rl:170 te = p p-- { @@ -4797,36 +4890,32 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To stack = append(stack, 0) stack[top] = cs top++ - cs = 1462 + cs = 1464 goto _again } } - case 36: - // line 133 "scan_tokens.rl" - + case 40: +//line scan_tokens.rl:156 te = p p-- { token(TokenStringLit) } - case 37: - // line 237 "scan_tokens.rl" - + case 41: +//line scan_tokens.rl:260 te = p p-- { token(TokenBadUTF8) } - case 38: - // line 133 "scan_tokens.rl" - + case 42: +//line scan_tokens.rl:156 p = (te) - 1 { token(TokenStringLit) } - case 39: - // line 1 "NONE" - + case 43: +//line NONE:1 switch act { case 0: { @@ -4846,61 +4935,52 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To } } - case 40: - // line 241 "scan_tokens.rl" - + case 44: +//line scan_tokens.rl:264 act = 16 - case 41: - // line 242 "scan_tokens.rl" - + case 45: +//line scan_tokens.rl:265 act = 17 - case 42: - // line 242 "scan_tokens.rl" - + case 46: +//line scan_tokens.rl:265 te = p + 1 { token(TokenBadUTF8) } - case 43: - // line 243 "scan_tokens.rl" - + case 47: +//line scan_tokens.rl:266 te = p + 1 { token(TokenInvalid) } - case 44: - // line 241 "scan_tokens.rl" - + case 48: +//line scan_tokens.rl:264 te = p p-- { token(TokenIdent) } - case 45: - // line 242 "scan_tokens.rl" - + case 49: +//line scan_tokens.rl:265 te = p p-- { token(TokenBadUTF8) } - case 46: - // line 241 "scan_tokens.rl" - + case 50: +//line scan_tokens.rl:264 p = (te) - 1 { token(TokenIdent) } - case 47: - // line 242 "scan_tokens.rl" - + case 51: +//line scan_tokens.rl:265 p = (te) - 1 { token(TokenBadUTF8) } - case 48: - // line 1 "NONE" - + case 52: +//line NONE:1 switch act { case 16: { @@ -4914,114 +4994,93 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To } } - case 49: - // line 249 "scan_tokens.rl" - + case 53: +//line scan_tokens.rl:272 act = 21 - case 50: - // line 251 "scan_tokens.rl" - - act = 22 - case 51: - // line 262 "scan_tokens.rl" - + case 54: +//line scan_tokens.rl:285 act = 32 - case 52: - // line 272 "scan_tokens.rl" - + case 55: +//line scan_tokens.rl:295 act = 38 - case 53: - // line 273 "scan_tokens.rl" - + case 56: +//line scan_tokens.rl:296 act = 39 - case 54: - // line 251 "scan_tokens.rl" - + case 57: +//line scan_tokens.rl:274 te = p + 1 { token(TokenComment) } - case 55: - // line 252 "scan_tokens.rl" - + case 58: +//line scan_tokens.rl:275 te = p + 1 { token(TokenNewline) } - case 56: - // line 254 "scan_tokens.rl" - + case 59: +//line scan_tokens.rl:277 te = p + 1 { token(TokenEqualOp) } - case 57: - // line 255 "scan_tokens.rl" - + case 60: +//line scan_tokens.rl:278 te = p + 1 { token(TokenNotEqual) } - case 58: - // line 256 "scan_tokens.rl" - + case 61: +//line scan_tokens.rl:279 te = p + 1 { token(TokenGreaterThanEq) } - case 59: - // line 257 "scan_tokens.rl" - + case 62: +//line scan_tokens.rl:280 te = p + 1 { token(TokenLessThanEq) } - case 60: - // line 258 "scan_tokens.rl" - + case 63: +//line scan_tokens.rl:281 te = p + 1 { token(TokenAnd) } - case 61: - // line 259 "scan_tokens.rl" - + case 64: +//line scan_tokens.rl:282 te = p + 1 { token(TokenOr) } - case 62: - // line 260 "scan_tokens.rl" - + case 65: +//line scan_tokens.rl:283 te = p + 1 { token(TokenEllipsis) } - case 63: - // line 261 "scan_tokens.rl" - + case 66: +//line scan_tokens.rl:284 te = p + 1 { token(TokenFatArrow) } - case 64: - // line 262 "scan_tokens.rl" - + case 67: +//line scan_tokens.rl:285 te = p + 1 { selfToken() } - case 65: - // line 157 "scan_tokens.rl" - + case 68: +//line scan_tokens.rl:180 te = p + 1 { token(TokenOBrace) braces++ } - case 66: - // line 162 "scan_tokens.rl" - + case 69: +//line scan_tokens.rl:185 te = p + 1 { if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces { @@ -5040,9 +5099,8 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To braces-- } } - case 67: - // line 174 "scan_tokens.rl" - + case 70: +//line scan_tokens.rl:197 te = p + 1 { // Only consume from the retBraces stack and return if we are at @@ -5070,9 +5128,8 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To braces-- } } - case 68: - // line 74 "scan_tokens.rl" - + case 71: +//line scan_tokens.rl:79 te = p + 1 { token(TokenOQuote) @@ -5080,13 +5137,12 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To stack = append(stack, 0) stack[top] = cs top++ - cs = 1513 + cs = 1514 goto _again } } - case 69: - // line 84 "scan_tokens.rl" - + case 72: +//line scan_tokens.rl:89 te = p + 1 { token(TokenOHeredoc) @@ -5111,120 +5167,94 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To stack = append(stack, 0) stack[top] = cs top++ - cs = 1527 + cs = 1540 goto _again } } - case 70: - // line 272 "scan_tokens.rl" - + case 73: +//line scan_tokens.rl:295 te = p + 1 { token(TokenBadUTF8) } - case 71: - // line 273 "scan_tokens.rl" - + case 74: +//line scan_tokens.rl:296 te = p + 1 { token(TokenInvalid) } - case 72: - // line 247 "scan_tokens.rl" - + case 75: +//line scan_tokens.rl:270 te = p p-- - case 73: - // line 248 "scan_tokens.rl" - + case 76: +//line scan_tokens.rl:271 te = p p-- { token(TokenNumberLit) } - case 74: - // line 249 "scan_tokens.rl" - + case 77: +//line scan_tokens.rl:272 te = p p-- { token(TokenIdent) } - case 75: - // line 251 "scan_tokens.rl" - - te = p - p-- - { - token(TokenComment) - } - case 76: - // line 262 "scan_tokens.rl" - + case 78: +//line scan_tokens.rl:285 te = p p-- { selfToken() } - case 77: - // line 272 "scan_tokens.rl" - + case 79: +//line scan_tokens.rl:295 te = p p-- { token(TokenBadUTF8) } - case 78: - // line 273 "scan_tokens.rl" - + case 80: +//line scan_tokens.rl:296 te = p p-- { token(TokenInvalid) } - case 79: - // line 248 "scan_tokens.rl" - + case 81: +//line scan_tokens.rl:271 p = (te) - 1 { token(TokenNumberLit) } - case 80: - // line 249 "scan_tokens.rl" - + case 82: +//line scan_tokens.rl:272 p = (te) - 1 { token(TokenIdent) } - case 81: - // line 262 "scan_tokens.rl" - + case 83: +//line scan_tokens.rl:285 p = (te) - 1 { selfToken() } - case 82: - // line 272 "scan_tokens.rl" - + case 84: +//line scan_tokens.rl:295 p = (te) - 1 { token(TokenBadUTF8) } - case 83: - // line 1 "NONE" - + case 85: +//line NONE:1 switch act { case 21: { p = (te) - 1 token(TokenIdent) } - case 22: - { - p = (te) - 1 - token(TokenComment) - } case 32: { p = (te) - 1 @@ -5242,7 +5272,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To } } - // line 5104 "scan_tokens.go" +//line scan_tokens.go:5138 } } @@ -5253,17 +5283,15 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To for ; _nacts > 0; _nacts-- { _acts++ switch _hcltok_actions[_acts-1] { - case 0: - // line 1 "NONE" - + case 4: +//line NONE:1 ts = 0 - case 1: - // line 1 "NONE" - + case 5: +//line NONE:1 act = 0 - // line 5124 "scan_tokens.go" +//line scan_tokens.go:5156 } } @@ -5289,13 +5317,23 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To } } - // line 334 "scan_tokens.rl" +//line scan_tokens.rl:357 // If we fall out here without being in a final state then we've // encountered something that the scanner can't match, which we'll // deal with as an invalid. if cs < hcltok_first_final { - f.emitToken(TokenInvalid, p, len(data)) + if mode == scanTemplate && len(stack) == 0 { + // If we're scanning a bare template then any straggling + // top-level stuff is actually literal string, rather than + // invalid. This handles the case where the template ends + // with a single "$" or "%", which trips us up because we + // want to see another character to decide if it's a sequence + // or an escape. + f.emitToken(TokenStringLit, ts, len(data)) + } else { + f.emitToken(TokenInvalid, ts, len(data)) + } } // We always emit a synthetic EOF token at the end, since it gives the diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.rl b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.rl index dc3f56b9b4..1f37b88baa 100644 --- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.rl +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.rl @@ -9,17 +9,22 @@ import ( // This file is generated from scan_tokens.rl. DO NOT EDIT. %%{ - # (except you are actually in scan_tokens.rl here, so edit away!) + # (except when you are actually in scan_tokens.rl here, so edit away!) machine hcltok; write data; }%% func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []Token { + stripData := stripUTF8BOM(data) + start.Byte += len(data) - len(stripData) + data = stripData + f := &tokenAccum{ - Filename: filename, - Bytes: data, - Pos: start, + Filename: filename, + Bytes: data, + Pos: start, + StartByte: start.Byte, } %%{ @@ -36,10 +41,10 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To NumberLitContinue = (digit|'.'|('e'|'E') ('+'|'-')? digit); NumberLit = digit ("" | (NumberLitContinue - '.') | (NumberLitContinue* (NumberLitContinue - '.'))); - Ident = ID_Start (ID_Continue | '-')*; + Ident = (ID_Start | '_') (ID_Continue | '-')*; # Symbols that just represent themselves are handled as a single rule. - SelfToken = "[" | "]" | "(" | ")" | "." | "," | "*" | "/" | "+" | "-" | "=" | "<" | ">" | "!" | "?" | ":" | "\n" | "&" | "|" | "~" | "^" | ";" | "`"; + SelfToken = "[" | "]" | "(" | ")" | "." | "," | "*" | "/" | "%" | "+" | "-" | "=" | "<" | ">" | "!" | "?" | ":" | "\n" | "&" | "|" | "~" | "^" | ";" | "`" | "'"; EqualOp = "=="; NotEqual = "!="; @@ -60,7 +65,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To Comment = ( ("#" (any - EndOfLine)* EndOfLine) | ("//" (any - EndOfLine)* EndOfLine) | - ("/*" any* "*/") + ("/*" any* :>> "*/") ); # Note: hclwrite assumes that only ASCII spaces appear between tokens, @@ -112,7 +117,25 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To if topdoc.StartOfLine { maybeMarker := bytes.TrimSpace(data[ts:te]) if bytes.Equal(maybeMarker, topdoc.Marker) { + // We actually emit two tokens here: the end-of-heredoc + // marker first, and then separately the newline that + // follows it. This then avoids issues with the closing + // marker consuming a newline that would normally be used + // to mark the end of an attribute definition. + // We might have either a \n sequence or an \r\n sequence + // here, so we must handle both. + nls := te-1 + nle := te + te-- + if data[te-1] == '\r' { + // back up one more byte + nls-- + te-- + } token(TokenCHeredoc); + ts = nls + te = nle + token(TokenNewline); heredocs = heredocs[:len(heredocs)-1] fret; } @@ -197,14 +220,14 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To EndStringTmpl = '"'; StringLiteralChars = (AnyUTF8 - ("\r"|"\n")); TemplateStringLiteral = ( - ('$' ^'{') | - ('%' ^'{') | + ('$' ^'{' %{ fhold; }) | + ('%' ^'{' %{ fhold; }) | ('\\' StringLiteralChars) | (StringLiteralChars - ("$" | '%' | '"')) )+; HeredocStringLiteral = ( - ('$' ^'{') | - ('%' ^'{') | + ('$' ^'{' %{ fhold; }) | + ('%' ^'{' %{ fhold; }) | (StringLiteralChars - ("$" | '%')) )*; BareStringLiteral = ( @@ -337,7 +360,17 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To // encountered something that the scanner can't match, which we'll // deal with as an invalid. if cs < hcltok_first_final { - f.emitToken(TokenInvalid, p, len(data)) + if mode == scanTemplate && len(stack) == 0 { + // If we're scanning a bare template then any straggling + // top-level stuff is actually literal string, rather than + // invalid. This handles the case where the template ends + // with a single "$" or "%", which trips us up because we + // want to see another character to decide if it's a sequence + // or an escape. + f.emitToken(TokenStringLit, ts, len(data)) + } else { + f.emitToken(TokenInvalid, ts, len(data)) + } } // We always emit a synthetic EOF token at the end, since it gives the diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md index faf279170d..091c1c23c6 100644 --- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md @@ -4,18 +4,18 @@ This is the specification of the syntax and semantics of the native syntax for HCL. HCL is a system for defining configuration languages for applications. The HCL information model is designed to support multiple concrete syntaxes for configuration, but this native syntax is considered the primary format -and is optimized for human authoring and maintenence, as opposed to machine +and is optimized for human authoring and maintenance, as opposed to machine generation of configuration. The language consists of three integrated sub-languages: -* The _structural_ language defines the overall heirarchical configuration +- The _structural_ language defines the overall hierarchical configuration structure, and is a serialization of HCL bodies, blocks and attributes. -* The _expression_ language is used to express attribute values, either as +- The _expression_ language is used to express attribute values, either as literals or as derivations of other values. -* The _template_ language is used to compose values together into strings, +- The _template_ language is used to compose values together into strings, as one of several types of expression in the expression language. In normal use these three sub-languages are used together within configuration @@ -30,19 +30,19 @@ Within this specification a semi-formal notation is used to illustrate the details of syntax. This notation is intended for human consumption rather than machine consumption, with the following conventions: -* A naked name starting with an uppercase letter is a global production, +- A naked name starting with an uppercase letter is a global production, common to all of the syntax specifications in this document. -* A naked name starting with a lowercase letter is a local production, +- A naked name starting with a lowercase letter is a local production, meaningful only within the specification where it is defined. -* Double and single quotes (`"` and `'`) are used to mark literal character +- Double and single quotes (`"` and `'`) are used to mark literal character sequences, which may be either punctuation markers or keywords. -* The default operator for combining items, which has no punctuation, +- The default operator for combining items, which has no punctuation, is concatenation. -* The symbol `|` indicates that any one of its left and right operands may +- The symbol `|` indicates that any one of its left and right operands may be present. -* The `*` symbol indicates zero or more repetitions of the item to its left. -* The `?` symbol indicates zero or one of the item to its left. -* Parentheses (`(` and `)`) are used to group items together to apply +- The `*` symbol indicates zero or more repetitions of the item to its left. +- The `?` symbol indicates zero or one of the item to its left. +- Parentheses (`(` and `)`) are used to group items together to apply the `|`, `*` and `?` operators to them collectively. The grammar notation does not fully describe the language. The prose may @@ -77,11 +77,11 @@ are not valid within HCL native syntax. Comments serve as program documentation and come in two forms: -* _Line comments_ start with either the `//` or `#` sequences and end with +- _Line comments_ start with either the `//` or `#` sequences and end with the next newline sequence. A line comments is considered equivalent to a newline sequence. -* _Inline comments_ start with the `/*` sequence and end with the `*/` +- _Inline comments_ start with the `/*` sequence and end with the `*/` sequence, and may have any characters within except the ending sequence. An inline comments is considered equivalent to a whitespace sequence. @@ -91,7 +91,7 @@ template literals except inside an interpolation sequence or template directive. ### Identifiers Identifiers name entities such as blocks, attributes and expression variables. -Identifiers are interpreted as per [UAX #31][UAX31] Section 2. Specifically, +Identifiers are interpreted as per [UAX #31][uax31] Section 2. Specifically, their syntax is defined in terms of the `ID_Start` and `ID_Continue` character properties as follows: @@ -109,7 +109,7 @@ that is not part of the unicode `ID_Continue` definition. This is to allow attribute names and block type names to contain dashes, although underscores as word separators are considered the idiomatic usage. -[UAX31]: http://unicode.org/reports/tr31/ "Unicode Identifier and Pattern Syntax" +[uax31]: http://unicode.org/reports/tr31/ "Unicode Identifier and Pattern Syntax" ### Keywords @@ -150,18 +150,19 @@ expmark = ('e' | 'E') ("+" | "-")?; The structural language consists of syntax representing the following constructs: -* _Attributes_, which assign a value to a specified name. -* _Blocks_, which create a child body annotated by a type and optional labels. -* _Body Content_, which consists of a collection of attributes and blocks. +- _Attributes_, which assign a value to a specified name. +- _Blocks_, which create a child body annotated by a type and optional labels. +- _Body Content_, which consists of a collection of attributes and blocks. These constructs correspond to the similarly-named concepts in the language-agnostic HCL information model. ```ebnf -ConfigFile = Body; -Body = (Attribute | Block)*; -Attribute = Identifier "=" Expression Newline; -Block = Identifier (StringLit)* "{" Newline Body "}" Newline; +ConfigFile = Body; +Body = (Attribute | Block | OneLineBlock)*; +Attribute = Identifier "=" Expression Newline; +Block = Identifier (StringLit|Identifier)* "{" Newline Body "}" Newline; +OneLineBlock = Identifier (StringLit|Identifier)* "{" (Identifier "=" Expression)? "}" Newline; ``` ### Configuration Files @@ -186,8 +187,10 @@ for later evaluation by the calling application. ### Blocks A _block_ creates a child body that is annotated with a block _type_ and -zero or more optional block _labels_. Blocks create a structural heirachy -which can be interpreted by the calling application. +zero or more block _labels_. Blocks create a structural hierachy which can be +interpreted by the calling application. + +Block labels can either be quoted literal strings or naked identifiers. ## Expressions @@ -250,9 +253,9 @@ LiteralValue = ( ); ``` -* Numeric literals represent values of type _number_. -* The `true` and `false` keywords represent values of type _bool_. -* The `null` keyword represents a null value of the dynamic pseudo-type. +- Numeric literals represent values of type _number_. +- The `true` and `false` keywords represent values of type _bool_. +- The `null` keyword represents a null value of the dynamic pseudo-type. String literals are not directly available in the expression sub-language, but are available via the template sub-language, which can in turn be incorporated @@ -283,8 +286,8 @@ When specifying an object element, an identifier is interpreted as a literal attribute name as opposed to a variable reference. To populate an item key from a variable, use parentheses to disambiguate: -* `{foo = "baz"}` is interpreted as an attribute literally named `foo`. -* `{(foo) = "baz"}` is interpreted as an attribute whose name is taken +- `{foo = "baz"}` is interpreted as an attribute literally named `foo`. +- `{(foo) = "baz"}` is interpreted as an attribute whose name is taken from the variable named `foo`. Between the open and closing delimiters of these sequences, newline sequences @@ -294,14 +297,14 @@ There is a syntax ambiguity between _for expressions_ and collection values whose first element is a reference to a variable named `for`. The _for expression_ interpretation has priority, so to produce a tuple whose first element is the value of a variable named `for`, or an object with a -key named `for`, use paretheses to disambiguate: +key named `for`, use parentheses to disambiguate: -* `[for, foo, baz]` is a syntax error. -* `[(for), foo, baz]` is a tuple whose first element is the value of variable +- `[for, foo, baz]` is a syntax error. +- `[(for), foo, baz]` is a tuple whose first element is the value of variable `for`. -* `{for: 1, baz: 2}` is a syntax error. -* `{(for): 1, baz: 2}` is an object with an attribute literally named `for`. -* `{baz: 2, for: 1}` is equivalent to the previous example, and resolves the +- `{for: 1, baz: 2}` is a syntax error. +- `{(for): 1, baz: 2}` is an object with an attribute literally named `for`. +- `{baz: 2, for: 1}` is equivalent to the previous example, and resolves the ambiguity by reordering. ### Template Expressions @@ -309,9 +312,9 @@ key named `for`, use paretheses to disambiguate: A _template expression_ embeds a program written in the template sub-language as an expression. Template expressions come in two forms: -* A _quoted_ template expression is delimited by quote characters (`"`) and +- A _quoted_ template expression is delimited by quote characters (`"`) and defines a template as a single-line expression with escape characters. -* A _heredoc_ template expression is introduced by a `<<` sequence and +- A _heredoc_ template expression is introduced by a `<<` sequence and defines a template via a multi-line sequence terminated by a user-chosen delimiter. @@ -319,7 +322,7 @@ In both cases the template interpolation and directive syntax is available for use within the delimiters, and any text outside of these special sequences is interpreted as a literal string. -In _quoted_ template expressions any literal string sequences within the +In _quoted_ template expressions any literal string sequences within the template behave in a special way: literal newline sequences are not permitted and instead _escape sequences_ can be included, starting with the backslash `\`: @@ -455,14 +458,14 @@ are provided, the first is the key and the second is the value. Tuple, object, list, map, and set types are iterable. The type of collection used defines how the key and value variables are populated: -* For tuple and list types, the _key_ is the zero-based index into the +- For tuple and list types, the _key_ is the zero-based index into the sequence for each element, and the _value_ is the element value. The elements are visited in index order. -* For object and map types, the _key_ is the string attribute name or element +- For object and map types, the _key_ is the string attribute name or element key, and the _value_ is the attribute or element value. The elements are visited in the order defined by a lexicographic sort of the attribute names or keys. -* For set types, the _key_ and _value_ are both the element value. The elements +- For set types, the _key_ and _value_ are both the element value. The elements are visited in an undefined but consistent order. The expression after the colon and (in the case of object `for`) the expression @@ -480,16 +483,16 @@ object. In the case of object `for`, it is an error if two input elements produce the same result from the attribute name expression, since duplicate attributes are not possible. If the ellipsis symbol (`...`) appears -immediately after the value experssion, this activates the grouping mode in +immediately after the value expression, this activates the grouping mode in which each value in the resulting object is a _tuple_ of all of the values that were produced against each distinct key. -* `[for v in ["a", "b"]: v]` returns `["a", "b"]`. -* `[for i, v in ["a", "b"]: i]` returns `[0, 1]`. -* `{for i, v in ["a", "b"]: v => i}` returns `{a = 0, b = 1}`. -* `{for i, v in ["a", "a", "b"]: k => v}` produces an error, because attribute +- `[for v in ["a", "b"]: v]` returns `["a", "b"]`. +- `[for i, v in ["a", "b"]: i]` returns `[0, 1]`. +- `{for i, v in ["a", "b"]: v => i}` returns `{a = 0, b = 1}`. +- `{for i, v in ["a", "a", "b"]: k => v}` produces an error, because attribute `a` is defined twice. -* `{for i, v in ["a", "a", "b"]: v => i...}` returns `{a = [0, 1], b = [2]}`. +- `{for i, v in ["a", "a", "b"]: v => i...}` returns `{a = [0, 1], b = [2]}`. If the `if` keyword is used after the element expression(s), it applies an additional predicate that can be used to conditionally filter elements from @@ -499,7 +502,7 @@ element expression(s). It must evaluate to a boolean value; if `true`, the element will be evaluated as normal, while if `false` the element will be skipped. -* `[for i, v in ["a", "b", "c"]: v if i < 2]` returns `["a", "b"]`. +- `[for i, v in ["a", "b", "c"]: v if i < 2]` returns `["a", "b"]`. If the collection value, element expression(s) or condition expression return unknown values that are otherwise type-valid, the result is a value of the @@ -564,10 +567,10 @@ elements in a tuple, list, or set value. There are two kinds of "splat" operator: -* The _attribute-only_ splat operator supports only attribute lookups into +- The _attribute-only_ splat operator supports only attribute lookups into the elements from a list, but supports an arbitrary number of them. -* The _full_ splat operator additionally supports indexing into the elements +- The _full_ splat operator additionally supports indexing into the elements from a list, and allows any combination of attribute access and index operations. @@ -580,9 +583,9 @@ fullSplat = "[" "*" "]" (GetAttr | Index)*; The splat operators can be thought of as shorthands for common operations that could otherwise be performed using _for expressions_: -* `tuple.*.foo.bar[0]` is approximately equivalent to +- `tuple.*.foo.bar[0]` is approximately equivalent to `[for v in tuple: v.foo.bar][0]`. -* `tuple[*].foo.bar[0]` is approximately equivalent to +- `tuple[*].foo.bar[0]` is approximately equivalent to `[for v in tuple: v.foo.bar[0]]` Note the difference in how the trailing index operator is interpreted in @@ -594,13 +597,15 @@ _for expressions_ shown above: if a splat operator is applied to a value that is _not_ of tuple, list, or set type, the value is coerced automatically into a single-value list of the value type: -* `any_object.*.id` is equivalent to `[any_object.id]`, assuming that `any_object` +- `any_object.*.id` is equivalent to `[any_object.id]`, assuming that `any_object` is a single object. -* `any_number.*` is equivalent to `[any_number]`, assuming that `any_number` +- `any_number.*` is equivalent to `[any_number]`, assuming that `any_number` is a single number. -If the left operand of a splat operator is an unknown value of any type, the -result is a value of the dynamic pseudo-type. +If applied to a null value that is not tuple, list, or set, the result is always +an empty tuple, which allows conveniently converting a possibly-null scalar +value into a tuple of zero or one elements. It is illegal to apply a splat +operator to a null value of tuple, list, or set type. ### Operations @@ -681,7 +686,7 @@ Arithmetic operations are considered to be performed in an arbitrary-precision number space. If either operand of an arithmetic operator is an unknown number or a value -of the dynamic pseudo-type, the result is an unknown number. +of the dynamic pseudo-type, the result is an unknown number. ### Logic Operators @@ -706,7 +711,7 @@ the outcome of a boolean expression. Conditional = Expression "?" Expression ":" Expression; ``` -The first expression is the _predicate_, which is evaluated and must produce +The first expression is the _predicate_, which is evaluated and must produce a boolean result. If the predicate value is `true`, the result of the second expression is the result of the conditional. If the predicate value is `false`, the result of the third expression is the result of the conditional. @@ -767,15 +772,15 @@ sequence is escaped as `%%{`. When the template sub-language is embedded in the expression language via _template expressions_, additional constraints and transforms are applied to -template literalsas described in the definition of template expressions. +template literals as described in the definition of template expressions. The value of a template literal can be modified by _strip markers_ in any interpolations or directives that are adjacent to it. A strip marker is a tilde (`~`) placed immediately after the opening `{` or before the closing `}` of a template sequence: -* `hello ${~ "world" }` produces `"helloworld"`. -* `%{ if true ~} hello %{~ endif }` produces `"hello"`. +- `hello ${~ "world" }` produces `"helloworld"`. +- `%{ if true ~} hello %{~ endif }` produces `"hello"`. When a strip marker is present, any spaces adjacent to it in the corresponding string literal (if any) are removed before producing the final value. Space @@ -784,7 +789,7 @@ characters are interpreted as per Unicode's definition. Stripping is done at syntax level rather than value level. Values returned by interpolations or directives are not subject to stripping: -* `${"hello" ~}${" world"}` produces `"hello world"`, and not `"helloworld"`, +- `${"hello" ~}${" world"}` produces `"hello world"`, and not `"helloworld"`, because the space is not in a template literal directly adjacent to the strip marker. @@ -822,9 +827,9 @@ TemplateIf = ( The evaluation of the `if` directive is equivalent to the conditional expression, with the following exceptions: -* The two sub-templates always produce strings, and thus the result value is +- The two sub-templates always produce strings, and thus the result value is also always a string. -* The `else` clause may be omitted, in which case the conditional's third +- The `else` clause may be omitted, in which case the conditional's third expression result is implied to be the empty string. ### Template For Directive @@ -844,9 +849,9 @@ TemplateFor = ( The evaluation of the `for` directive is equivalent to the _for expression_ when producing a tuple, with the following exceptions: -* The sub-template always produces a string. -* There is no equivalent of the "if" clause on the for expression. -* The elements of the resulting tuple are all converted to strings and +- The sub-template always produces a string. +- There is no equivalent of the "if" clause on the for expression. +- The elements of the resulting tuple are all converted to strings and concatenated to produce a flat string result. ### Template Interpolation Unwrapping @@ -862,13 +867,13 @@ template or expression syntax. Unwrapping allows arbitrary expressions to be used to populate attributes when strings in such languages are interpreted as templates. -* `${true}` produces the boolean value `true` -* `${"${true}"}` produces the boolean value `true`, because both the inner +- `${true}` produces the boolean value `true` +- `${"${true}"}` produces the boolean value `true`, because both the inner and outer interpolations are subject to unwrapping. -* `hello ${true}` produces the string `"hello true"` -* `${""}${true}` produces the string `"true"` because there are two +- `hello ${true}` produces the string `"hello true"` +- `${""}${true}` produces the string `"true"` because there are two interpolation sequences, even though one produces an empty result. -* `%{ for v in [true] }${v}%{ endif }` produces the string `true` because +- `%{ for v in [true] }${v}%{ endif }` produces the string `true` because the presence of the `for` directive circumvents the unwrapping even though the final result is a single value. @@ -877,3 +882,45 @@ application, by converting the final template result to string. This is necessary, for example, if a standalone template is being used to produce the direct contents of a file, since the result in that case must always be a string. + +## Static Analysis + +The HCL static analysis operations are implemented for some expression types +in the native syntax, as described in the following sections. + +A goal for static analysis of the native syntax is for the interpretation to +be as consistent as possible with the dynamic evaluation interpretation of +the given expression, though some deviations are intentionally made in order +to maximize the potential for analysis. + +### Static List + +The tuple construction syntax can be interpreted as a static list. All of +the expression elements given are returned as the static list elements, +with no further interpretation. + +### Static Map + +The object construction syntax can be interpreted as a static map. All of the +key/value pairs given are returned as the static pairs, with no further +interpretation. + +The usual requirement that an attribute name be interpretable as a string +does not apply to this static analysis, allowing callers to provide map-like +constructs with different key types by building on the map syntax. + +### Static Call + +The function call syntax can be interpreted as a static call. The called +function name is returned verbatim and the given argument expressions are +returned as the static arguments, with no further interpretation. + +### Static Traversal + +A variable expression and any attached attribute access operations and +constant index operations can be interpreted as a static traversal. + +The keywords `true`, `false` and `null` can also be interpreted as +static traversals, behaving as if they were references to variables of those +names, to allow callers to redefine the meaning of those keywords in certain +contexts. diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go index d69f65b62e..476025d1ba 100644 --- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go @@ -9,6 +9,10 @@ import ( // AsHCLBlock returns the block data expressed as a *hcl.Block. func (b *Block) AsHCLBlock() *hcl.Block { + if b == nil { + return nil + } + lastHeaderRange := b.TypeRange if len(b.LabelRanges) > 0 { lastHeaderRange = b.LabelRanges[len(b.LabelRanges)-1] @@ -43,8 +47,8 @@ type Body struct { var assertBodyImplBody hcl.Body = &Body{} func (b *Body) walkChildNodes(w internalWalkFunc) { - b.Attributes = w(b.Attributes).(Attributes) - b.Blocks = w(b.Blocks).(Blocks) + w(b.Attributes) + w(b.Blocks) } func (b *Body) Range() hcl.Range { @@ -82,8 +86,8 @@ func (b *Body) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostic diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, - Summary: "Unsupported attribute", - Detail: fmt.Sprintf("An attribute named %q is not expected here.%s", name, suggestion), + Summary: "Unsupported argument", + Detail: fmt.Sprintf("An argument named %q is not expected here.%s", name, suggestion), Subject: &attr.NameRange, }) } @@ -103,7 +107,7 @@ func (b *Body) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostic // Is there an attribute of the same name? for _, attrS := range schema.Attributes { if attrS.Name == blockTy { - suggestion = fmt.Sprintf(" Did you mean to define attribute %q?", blockTy) + suggestion = fmt.Sprintf(" Did you mean to define argument %q? If so, use the equals sign to assign it a value.", blockTy) break } } @@ -147,8 +151,8 @@ func (b *Body) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Bod if attrS.Required { diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, - Summary: "Missing required attribute", - Detail: fmt.Sprintf("The attribute %q is required, but no definition was found.", attrS.Name), + Summary: "Missing required argument", + Detail: fmt.Sprintf("The argument %q is required, but no definition was found.", attrS.Name), Subject: b.MissingItemRange().Ptr(), }) } @@ -251,9 +255,9 @@ func (b *Body) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { example := b.Blocks[0] diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, - Summary: fmt.Sprintf("Unexpected %s block", example.Type), + Summary: fmt.Sprintf("Unexpected %q block", example.Type), Detail: "Blocks are not allowed here.", - Context: &example.TypeRange, + Subject: &example.TypeRange, }) // we will continue processing anyway, and return the attributes // we are able to find so that certain analyses can still be done @@ -275,15 +279,19 @@ func (b *Body) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { } func (b *Body) MissingItemRange() hcl.Range { - return b.EndRange + return hcl.Range{ + Filename: b.SrcRange.Filename, + Start: b.SrcRange.Start, + End: b.SrcRange.Start, + } } // Attributes is the collection of attribute definitions within a body. type Attributes map[string]*Attribute func (a Attributes) walkChildNodes(w internalWalkFunc) { - for k, attr := range a { - a[k] = w(attr).(*Attribute) + for _, attr := range a { + w(attr) } } @@ -317,7 +325,7 @@ type Attribute struct { } func (a *Attribute) walkChildNodes(w internalWalkFunc) { - a.Expr = w(a.Expr).(Expression) + w(a.Expr) } func (a *Attribute) Range() hcl.Range { @@ -326,6 +334,9 @@ func (a *Attribute) Range() hcl.Range { // AsHCLAttribute returns the block data expressed as a *hcl.Attribute. func (a *Attribute) AsHCLAttribute() *hcl.Attribute { + if a == nil { + return nil + } return &hcl.Attribute{ Name: a.Name, Expr: a.Expr, @@ -339,8 +350,8 @@ func (a *Attribute) AsHCLAttribute() *hcl.Attribute { type Blocks []*Block func (bs Blocks) walkChildNodes(w internalWalkFunc) { - for i, block := range bs { - bs[i] = w(block).(*Block) + for _, block := range bs { + w(block) } } @@ -371,9 +382,13 @@ type Block struct { } func (b *Block) walkChildNodes(w internalWalkFunc) { - b.Body = w(b.Body).(*Body) + w(b.Body) } func (b *Block) Range() hcl.Range { return hcl.RangeBetween(b.TypeRange, b.CloseBraceRange) } + +func (b *Block) DefRange() hcl.Range { + return hcl.RangeBetween(b.TypeRange, b.OpenBraceRange) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure_at_pos.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure_at_pos.go new file mode 100644 index 0000000000..d8f023ba05 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure_at_pos.go @@ -0,0 +1,118 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// ----------------------------------------------------------------------------- +// The methods in this file are all optional extension methods that serve to +// implement the methods of the same name on *hcl.File when its root body +// is provided by this package. +// ----------------------------------------------------------------------------- + +// BlocksAtPos implements the method of the same name for an *hcl.File that +// is backed by a *Body. +func (b *Body) BlocksAtPos(pos hcl.Pos) []*hcl.Block { + list, _ := b.blocksAtPos(pos, true) + return list +} + +// InnermostBlockAtPos implements the method of the same name for an *hcl.File +// that is backed by a *Body. +func (b *Body) InnermostBlockAtPos(pos hcl.Pos) *hcl.Block { + _, innermost := b.blocksAtPos(pos, false) + return innermost.AsHCLBlock() +} + +// OutermostBlockAtPos implements the method of the same name for an *hcl.File +// that is backed by a *Body. +func (b *Body) OutermostBlockAtPos(pos hcl.Pos) *hcl.Block { + return b.outermostBlockAtPos(pos).AsHCLBlock() +} + +// blocksAtPos is the internal engine of both BlocksAtPos and +// InnermostBlockAtPos, which both need to do the same logic but return a +// differently-shaped result. +// +// list is nil if makeList is false, avoiding an allocation. Innermost is +// always set, and if the returned list is non-nil it will always match the +// final element from that list. +func (b *Body) blocksAtPos(pos hcl.Pos, makeList bool) (list []*hcl.Block, innermost *Block) { + current := b + +Blocks: + for current != nil { + for _, block := range current.Blocks { + wholeRange := hcl.RangeBetween(block.TypeRange, block.CloseBraceRange) + if wholeRange.ContainsPos(pos) { + innermost = block + if makeList { + list = append(list, innermost.AsHCLBlock()) + } + current = block.Body + continue Blocks + } + } + + // If we fall out here then none of the current body's nested blocks + // contain the position we are looking for, and so we're done. + break + } + + return +} + +// outermostBlockAtPos is the internal version of OutermostBlockAtPos that +// returns a hclsyntax.Block rather than an hcl.Block, allowing for further +// analysis if necessary. +func (b *Body) outermostBlockAtPos(pos hcl.Pos) *Block { + // This is similar to blocksAtPos, but simpler because we know it only + // ever needs to search the first level of nested blocks. + + for _, block := range b.Blocks { + wholeRange := hcl.RangeBetween(block.TypeRange, block.CloseBraceRange) + if wholeRange.ContainsPos(pos) { + return block + } + } + + return nil +} + +// AttributeAtPos implements the method of the same name for an *hcl.File +// that is backed by a *Body. +func (b *Body) AttributeAtPos(pos hcl.Pos) *hcl.Attribute { + return b.attributeAtPos(pos).AsHCLAttribute() +} + +// attributeAtPos is the internal version of AttributeAtPos that returns a +// hclsyntax.Block rather than an hcl.Block, allowing for further analysis if +// necessary. +func (b *Body) attributeAtPos(pos hcl.Pos) *Attribute { + searchBody := b + _, block := b.blocksAtPos(pos, false) + if block != nil { + searchBody = block.Body + } + + for _, attr := range searchBody.Attributes { + if attr.SrcRange.ContainsPos(pos) { + return attr + } + } + + return nil +} + +// OutermostExprAtPos implements the method of the same name for an *hcl.File +// that is backed by a *Body. +func (b *Body) OutermostExprAtPos(pos hcl.Pos) hcl.Expression { + attr := b.attributeAtPos(pos) + if attr == nil { + return nil + } + if !attr.Expr.Range().ContainsPos(pos) { + return nil + } + return attr.Expr +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go index 53e847d136..67be099c18 100644 --- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go @@ -1,6 +1,7 @@ package hclsyntax import ( + "bytes" "fmt" "github.com/apparentlymart/go-textseg/textseg" @@ -89,6 +90,7 @@ const ( TokenBitwiseNot TokenType = '~' TokenBitwiseXor TokenType = '^' TokenStarStar TokenType = '➚' + TokenApostrophe TokenType = '\'' TokenBacktick TokenType = '`' TokenSemicolon TokenType = ';' TokenTabs TokenType = '␉' @@ -114,10 +116,11 @@ const ( ) type tokenAccum struct { - Filename string - Bytes []byte - Pos hcl.Pos - Tokens []Token + Filename string + Bytes []byte + Pos hcl.Pos + Tokens []Token + StartByte int } func (f *tokenAccum) emitToken(ty TokenType, startOfs, endOfs int) { @@ -125,15 +128,15 @@ func (f *tokenAccum) emitToken(ty TokenType, startOfs, endOfs int) { // the start pos to get our end pos. start := f.Pos - start.Column += startOfs - f.Pos.Byte // Safe because only ASCII spaces can be in the offset - start.Byte = startOfs + start.Column += startOfs + f.StartByte - f.Pos.Byte // Safe because only ASCII spaces can be in the offset + start.Byte = startOfs + f.StartByte end := start - end.Byte = endOfs + end.Byte = endOfs + f.StartByte b := f.Bytes[startOfs:endOfs] for len(b) > 0 { advance, seq, _ := textseg.ScanGraphemeClusters(b, true) - if len(seq) == 1 && seq[0] == '\n' { + if (len(seq) == 1 && seq[0] == '\n') || (len(seq) == 2 && seq[0] == '\r' && seq[1] == '\n') { end.Line++ end.Column = 1 } else { @@ -160,6 +163,13 @@ type heredocInProgress struct { StartOfLine bool } +func tokenOpensFlushHeredoc(tok Token) bool { + if tok.Type != TokenOHeredoc { + return false + } + return bytes.HasPrefix(tok.Bytes, []byte{'<', '<', '-'}) +} + // checkInvalidTokens does a simple pass across the given tokens and generates // diagnostics for tokens that should _never_ appear in HCL source. This // is intended to avoid the need for the parser to have special support @@ -174,11 +184,15 @@ func checkInvalidTokens(tokens Tokens) hcl.Diagnostics { toldBitwise := 0 toldExponent := 0 toldBacktick := 0 + toldApostrophe := 0 toldSemicolon := 0 toldTabs := 0 toldBadUTF8 := 0 for _, tok := range tokens { + // copy token so it's safe to point to it + tok := tok + switch tok.Type { case TokenBitwiseAnd, TokenBitwiseOr, TokenBitwiseXor, TokenBitwiseNot: if toldBitwise < 4 { @@ -214,22 +228,36 @@ func checkInvalidTokens(tokens Tokens) hcl.Diagnostics { case TokenBacktick: // Only report for alternating (even) backticks, so we won't report both start and ends of the same // backtick-quoted string. - if toldExponent < 4 && (toldExponent%2) == 0 { + if (toldBacktick % 2) == 0 { diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Invalid character", Detail: "The \"`\" character is not valid. To create a multi-line string, use the \"heredoc\" syntax, like \"< 0 { + ret := make(Traversal, len(traversal)) + copy(ret, traversal) root := traversal[0].(TraverseRoot) - traversal[0] = TraverseAttr{ + ret[0] = TraverseAttr{ Name: root.Name, SrcRange: root.SrcRange, } + return ret, diags } return traversal, diags } diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/public.go b/vendor/github.com/hashicorp/hcl2/hcldec/public.go index 5d1f10a3db..3c803632d9 100644 --- a/vendor/github.com/hashicorp/hcl2/hcldec/public.go +++ b/vendor/github.com/hashicorp/hcl2/hcldec/public.go @@ -65,7 +65,10 @@ func ChildBlockTypes(spec Spec) map[string]Spec { visit = func(s Spec) { if bs, ok := s.(blockSpec); ok { for _, blockS := range bs.blockHeaderSchemata() { - ret[blockS.Type] = bs.nestedSpec() + nested := bs.nestedSpec() + if nested != nil { // nil can be returned to dynamically opt out of this interface + ret[blockS.Type] = nested + } } } diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/spec.go b/vendor/github.com/hashicorp/hcl2/hcldec/spec.go index 25cafcd97b..f9da7f65bc 100644 --- a/vendor/github.com/hashicorp/hcl2/hcldec/spec.go +++ b/vendor/github.com/hashicorp/hcl2/hcldec/spec.go @@ -3,6 +3,7 @@ package hcldec import ( "bytes" "fmt" + "sort" "github.com/hashicorp/hcl2/hcl" "github.com/zclconf/go-cty/cty" @@ -477,6 +478,44 @@ func (s *BlockListSpec) decode(content *hcl.BodyContent, blockLabels []blockLabe if len(elems) == 0 { ret = cty.ListValEmpty(s.Nested.impliedType()) } else { + // Since our target is a list, all of the decoded elements must have the + // same type or cty.ListVal will panic below. Different types can arise + // if there is an attribute spec of type cty.DynamicPseudoType in the + // nested spec; all given values must be convertable to a single type + // in order for the result to be considered valid. + etys := make([]cty.Type, len(elems)) + for i, v := range elems { + etys[i] = v.Type() + } + ety, convs := convert.UnifyUnsafe(etys) + if ety == cty.NilType { + // FIXME: This is a pretty terrible error message. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), + Detail: "Corresponding attributes in all blocks of this type must be the same.", + Subject: &sourceRanges[0], + }) + return cty.DynamicVal, diags + } + for i, v := range elems { + if convs[i] != nil { + newV, err := convs[i](v) + if err != nil { + // FIXME: This is a pretty terrible error message. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), + Detail: fmt.Sprintf("Block with index %d has inconsistent argument types: %s.", i, err), + Subject: &sourceRanges[i], + }) + // Bail early here so we won't panic below in cty.ListVal + return cty.DynamicVal, diags + } + elems[i] = newV + } + } + ret = cty.ListVal(elems) } @@ -508,6 +547,127 @@ func (s *BlockListSpec) sourceRange(content *hcl.BodyContent, blockLabels []bloc return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) } +// A BlockTupleSpec is a Spec that produces a cty tuple of the results of +// decoding all of the nested blocks of a given type, using a nested spec. +// +// This is similar to BlockListSpec, but it permits the nested blocks to have +// different result types in situations where cty.DynamicPseudoType attributes +// are present. +type BlockTupleSpec struct { + TypeName string + Nested Spec + MinItems int + MaxItems int +} + +func (s *BlockTupleSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockTupleSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: findLabelSpecs(s.Nested), + }, + } +} + +// blockSpec implementation +func (s *BlockTupleSpec) nestedSpec() Spec { + return s.Nested +} + +// specNeedingVariables implementation +func (s *BlockTupleSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var ret []hcl.Traversal + + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + ret = append(ret, Variables(childBlock.Body, s.Nested)...) + } + + return ret +} + +func (s *BlockTupleSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + if s.Nested == nil { + panic("BlockListSpec with no Nested Spec") + } + + var elems []cty.Value + var sourceRanges []hcl.Range + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) + diags = append(diags, childDiags...) + elems = append(elems, val) + sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)) + } + + if len(elems) < s.MinItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName), + Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName), + Subject: &content.MissingItemRange, + }) + } else if s.MaxItems > 0 && len(elems) > s.MaxItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Too many %s blocks", s.TypeName), + Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName), + Subject: &sourceRanges[s.MaxItems], + }) + } + + var ret cty.Value + + if len(elems) == 0 { + ret = cty.EmptyTupleVal + } else { + ret = cty.TupleVal(elems) + } + + return ret, diags +} + +func (s *BlockTupleSpec) impliedType() cty.Type { + // We can't predict our type, because we don't know how many blocks + // there will be until we decode. + return cty.DynamicPseudoType +} + +func (s *BlockTupleSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We return the source range of the _first_ block of the given type, + // since they are not guaranteed to form a contiguous range. + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + // A BlockSetSpec is a Spec that produces a cty set of the results of // decoding all of the nested blocks of a given type, using a nested spec. type BlockSetSpec struct { @@ -592,6 +752,44 @@ func (s *BlockSetSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel if len(elems) == 0 { ret = cty.SetValEmpty(s.Nested.impliedType()) } else { + // Since our target is a set, all of the decoded elements must have the + // same type or cty.SetVal will panic below. Different types can arise + // if there is an attribute spec of type cty.DynamicPseudoType in the + // nested spec; all given values must be convertable to a single type + // in order for the result to be considered valid. + etys := make([]cty.Type, len(elems)) + for i, v := range elems { + etys[i] = v.Type() + } + ety, convs := convert.UnifyUnsafe(etys) + if ety == cty.NilType { + // FIXME: This is a pretty terrible error message. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), + Detail: "Corresponding attributes in all blocks of this type must be the same.", + Subject: &sourceRanges[0], + }) + return cty.DynamicVal, diags + } + for i, v := range elems { + if convs[i] != nil { + newV, err := convs[i](v) + if err != nil { + // FIXME: This is a pretty terrible error message. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), + Detail: fmt.Sprintf("Block with index %d has inconsistent argument types: %s.", i, err), + Subject: &sourceRanges[i], + }) + // Bail early here so we won't panic below in cty.ListVal + return cty.DynamicVal, diags + } + elems[i] = newV + } + } + ret = cty.SetVal(elems) } @@ -672,7 +870,10 @@ func (s *BlockMapSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel var diags hcl.Diagnostics if s.Nested == nil { - panic("BlockSetSpec with no Nested Spec") + panic("BlockMapSpec with no Nested Spec") + } + if ImpliedType(s).HasDynamicTypes() { + panic("cty.DynamicPseudoType attributes may not be used inside a BlockMapSpec") } elems := map[string]interface{}{} @@ -765,6 +966,307 @@ func (s *BlockMapSpec) sourceRange(content *hcl.BodyContent, blockLabels []block return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) } +// A BlockObjectSpec is a Spec that produces a cty object of the results of +// decoding all of the nested blocks of a given type, using a nested spec. +// +// One level of object structure is created for each of the given label names. +// There must be at least one given label name. +// +// This is similar to BlockMapSpec, but it permits the nested blocks to have +// different result types in situations where cty.DynamicPseudoType attributes +// are present. +type BlockObjectSpec struct { + TypeName string + LabelNames []string + Nested Spec +} + +func (s *BlockObjectSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockObjectSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: append(s.LabelNames, findLabelSpecs(s.Nested)...), + }, + } +} + +// blockSpec implementation +func (s *BlockObjectSpec) nestedSpec() Spec { + return s.Nested +} + +// specNeedingVariables implementation +func (s *BlockObjectSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var ret []hcl.Traversal + + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + ret = append(ret, Variables(childBlock.Body, s.Nested)...) + } + + return ret +} + +func (s *BlockObjectSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + if s.Nested == nil { + panic("BlockObjectSpec with no Nested Spec") + } + + elems := map[string]interface{}{} + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + childLabels := labelsForBlock(childBlock) + val, _, childDiags := decode(childBlock.Body, childLabels[len(s.LabelNames):], ctx, s.Nested, false) + targetMap := elems + for _, key := range childBlock.Labels[:len(s.LabelNames)-1] { + if _, exists := targetMap[key]; !exists { + targetMap[key] = make(map[string]interface{}) + } + targetMap = targetMap[key].(map[string]interface{}) + } + + diags = append(diags, childDiags...) + + key := childBlock.Labels[len(s.LabelNames)-1] + if _, exists := targetMap[key]; exists { + labelsBuf := bytes.Buffer{} + for _, label := range childBlock.Labels { + fmt.Fprintf(&labelsBuf, " %q", label) + } + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), + Detail: fmt.Sprintf( + "A block for %s%s was already defined. The %s labels must be unique.", + s.TypeName, labelsBuf.String(), s.TypeName, + ), + Subject: &childBlock.DefRange, + }) + continue + } + + targetMap[key] = val + } + + if len(elems) == 0 { + return cty.EmptyObjectVal, diags + } + + var ctyObj func(map[string]interface{}, int) cty.Value + ctyObj = func(raw map[string]interface{}, depth int) cty.Value { + vals := make(map[string]cty.Value, len(raw)) + if depth == 1 { + for k, v := range raw { + vals[k] = v.(cty.Value) + } + } else { + for k, v := range raw { + vals[k] = ctyObj(v.(map[string]interface{}), depth-1) + } + } + return cty.ObjectVal(vals) + } + + return ctyObj(elems, len(s.LabelNames)), diags +} + +func (s *BlockObjectSpec) impliedType() cty.Type { + // We can't predict our type, since we don't know how many blocks are + // present and what labels they have until we decode. + return cty.DynamicPseudoType +} + +func (s *BlockObjectSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We return the source range of the _first_ block of the given type, + // since they are not guaranteed to form a contiguous range. + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + +// A BlockAttrsSpec is a Spec that interprets a single block as if it were +// a map of some element type. That is, each attribute within the block +// becomes a key in the resulting map and the attribute's value becomes the +// element value, after conversion to the given element type. The resulting +// value is a cty.Map of the given element type. +// +// This spec imposes a validation constraint that there be exactly one block +// of the given type name and that this block may contain only attributes. The +// block does not accept any labels. +// +// This is an alternative to an AttrSpec of a map type for situations where +// block syntax is desired. Note that block syntax does not permit dynamic +// keys, construction of the result via a "for" expression, etc. In most cases +// an AttrSpec is preferred if the desired result is a map whose keys are +// chosen by the user rather than by schema. +type BlockAttrsSpec struct { + TypeName string + ElementType cty.Type + Required bool +} + +func (s *BlockAttrsSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node +} + +// blockSpec implementation +func (s *BlockAttrsSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: nil, + }, + } +} + +// blockSpec implementation +func (s *BlockAttrsSpec) nestedSpec() Spec { + // This is an odd case: we aren't actually going to apply a nested spec + // in this case, since we're going to interpret the body directly as + // attributes, but we need to return something non-nil so that the + // decoder will recognize this as a block spec. We won't actually be + // using this for anything at decode time. + return noopSpec{} +} + +// specNeedingVariables implementation +func (s *BlockAttrsSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + + block, _ := s.findBlock(content) + if block == nil { + return nil + } + + var vars []hcl.Traversal + + attrs, diags := block.Body.JustAttributes() + if diags.HasErrors() { + return nil + } + + for _, attr := range attrs { + vars = append(vars, attr.Expr.Variables()...) + } + + // We'll return the variables references in source order so that any + // error messages that result are also in source order. + sort.Slice(vars, func(i, j int) bool { + return vars[i].SourceRange().Start.Byte < vars[j].SourceRange().Start.Byte + }) + + return vars +} + +func (s *BlockAttrsSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + block, other := s.findBlock(content) + if block == nil { + if s.Required { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Missing %s block", s.TypeName), + Detail: fmt.Sprintf( + "A block of type %q is required here.", s.TypeName, + ), + Subject: &content.MissingItemRange, + }) + } + return cty.NullVal(cty.Map(s.ElementType)), diags + } + if other != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), + Detail: fmt.Sprintf( + "Only one block of type %q is allowed. Previous definition was at %s.", + s.TypeName, block.DefRange.String(), + ), + Subject: &other.DefRange, + }) + } + + attrs, attrDiags := block.Body.JustAttributes() + diags = append(diags, attrDiags...) + + if len(attrs) == 0 { + return cty.MapValEmpty(s.ElementType), diags + } + + vals := make(map[string]cty.Value, len(attrs)) + for name, attr := range attrs { + attrVal, attrDiags := attr.Expr.Value(ctx) + diags = append(diags, attrDiags...) + + attrVal, err := convert.Convert(attrVal, s.ElementType) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid attribute value", + Detail: fmt.Sprintf("Invalid value for attribute of %q block: %s.", s.TypeName, err), + Subject: attr.Expr.Range().Ptr(), + }) + attrVal = cty.UnknownVal(s.ElementType) + } + + vals[name] = attrVal + } + + return cty.MapVal(vals), diags +} + +func (s *BlockAttrsSpec) impliedType() cty.Type { + return cty.Map(s.ElementType) +} + +func (s *BlockAttrsSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + block, _ := s.findBlock(content) + if block == nil { + return content.MissingItemRange + } + return block.DefRange +} + +func (s *BlockAttrsSpec) findBlock(content *hcl.BodyContent) (block *hcl.Block, other *hcl.Block) { + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + if block != nil { + return block, candidate + } + block = candidate + } + + return block, nil +} + // A BlockLabelSpec is a Spec that returns a cty.String representing the // label of the block its given body belongs to, if indeed its given body // belongs to a block. It is a programming error to use this in a non-block @@ -848,6 +1350,16 @@ func findLabelSpecs(spec Spec) []string { // // The two specifications must have the same implied result type for correct // operation. If not, the result is undefined. +// +// Any requirements imposed by the "Default" spec apply even if "Primary" does +// not return null. For example, if the "Default" spec is for a required +// attribute then that attribute is always required, regardless of the result +// of the "Primary" spec. +// +// The "Default" spec must not describe a nested block, since otherwise the +// result of ChildBlockTypes would not be decidable without evaluation. If +// the default spec _does_ describe a nested block then the result is +// undefined. type DefaultSpec struct { Primary Spec Default Spec @@ -872,6 +1384,38 @@ func (s *DefaultSpec) impliedType() cty.Type { return s.Primary.impliedType() } +// attrSpec implementation +func (s *DefaultSpec) attrSchemata() []hcl.AttributeSchema { + // We must pass through the union of both of our nested specs so that + // we'll have both values available in the result. + var ret []hcl.AttributeSchema + if as, ok := s.Primary.(attrSpec); ok { + ret = append(ret, as.attrSchemata()...) + } + if as, ok := s.Default.(attrSpec); ok { + ret = append(ret, as.attrSchemata()...) + } + return ret +} + +// blockSpec implementation +func (s *DefaultSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + // Only the primary spec may describe a block, since otherwise + // our nestedSpec method below can't know which to return. + if bs, ok := s.Primary.(blockSpec); ok { + return bs.blockHeaderSchemata() + } + return nil +} + +// blockSpec implementation +func (s *DefaultSpec) nestedSpec() Spec { + if bs, ok := s.Primary.(blockSpec); ok { + return bs.nestedSpec() + } + return nil +} + func (s *DefaultSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { // We can't tell from here which of the two specs will ultimately be used // in our result, so we'll just assume the first. This is usually the right @@ -996,3 +1540,28 @@ func (s *TransformFuncSpec) sourceRange(content *hcl.BodyContent, blockLabels [] // not super-accurate, because there's nothing better to return. return s.Wrapped.sourceRange(content, blockLabels) } + +// noopSpec is a placeholder spec that does nothing, used in situations where +// a non-nil placeholder spec is required. It is not exported because there is +// no reason to use it directly; it is always an implementation detail only. +type noopSpec struct { +} + +func (s noopSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return cty.NullVal(cty.DynamicPseudoType), nil +} + +func (s noopSpec) impliedType() cty.Type { + return cty.DynamicPseudoType +} + +func (s noopSpec) visitSameBodyChildren(cb visitFunc) { + // nothing to do +} + +func (s noopSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // No useful range for a noopSpec, and nobody should be calling this anyway. + return hcl.Range{ + Filename: "noopSpec", + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/variables.go b/vendor/github.com/hashicorp/hcl2/hcldec/variables.go index 427b0d0e62..7662516cad 100644 --- a/vendor/github.com/hashicorp/hcl2/hcldec/variables.go +++ b/vendor/github.com/hashicorp/hcl2/hcldec/variables.go @@ -15,20 +15,22 @@ import ( // be incomplete, but that's assumed to be okay because the eventual call // to Decode will produce error diagnostics anyway. func Variables(body hcl.Body, spec Spec) []hcl.Traversal { + var vars []hcl.Traversal schema := ImpliedSchema(spec) - content, _, _ := body.PartialContent(schema) - var vars []hcl.Traversal - if vs, ok := spec.(specNeedingVariables); ok { vars = append(vars, vs.variablesNeeded(content)...) } - spec.visitSameBodyChildren(func(s Spec) { + + var visitFn visitFunc + visitFn = func(s Spec) { if vs, ok := s.(specNeedingVariables); ok { vars = append(vars, vs.variablesNeeded(content)...) } - }) + s.visitSameBodyChildren(visitFn) + } + spec.visitSameBodyChildren(visitFn) return vars } diff --git a/vendor/github.com/hashicorp/hcl2/hcled/doc.go b/vendor/github.com/hashicorp/hcl2/hcled/doc.go new file mode 100644 index 0000000000..1a8014480c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcled/doc.go @@ -0,0 +1,4 @@ +// Package hcled provides functionality intended to help an application +// that embeds HCL to deliver relevant information to a text editor or IDE +// for navigating around and analyzing configuration files. +package hcled diff --git a/vendor/github.com/hashicorp/hcl2/hcled/navigation.go b/vendor/github.com/hashicorp/hcl2/hcled/navigation.go new file mode 100644 index 0000000000..5d10cd86ce --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcled/navigation.go @@ -0,0 +1,34 @@ +package hcled + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +type contextStringer interface { + ContextString(offset int) string +} + +// ContextString returns a string describing the context of the given byte +// offset, if available. An empty string is returned if no such information +// is available, or otherwise the returned string is in a form that depends +// on the language used to write the referenced file. +func ContextString(file *hcl.File, offset int) string { + if cser, ok := file.Nav.(contextStringer); ok { + return cser.ContextString(offset) + } + return "" +} + +type contextDefRanger interface { + ContextDefRange(offset int) hcl.Range +} + +func ContextDefRange(file *hcl.File, offset int) hcl.Range { + if cser, ok := file.Nav.(contextDefRanger); ok { + defRange := cser.ContextDefRange(offset) + if !defRange.Empty() { + return defRange + } + } + return file.Body.MissingItemRange() +} diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/ast.go b/vendor/github.com/hashicorp/hcl2/hclwrite/ast.go new file mode 100644 index 0000000000..090416528d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hclwrite/ast.go @@ -0,0 +1,121 @@ +package hclwrite + +import ( + "bytes" + "io" +) + +type File struct { + inTree + + srcBytes []byte + body *node +} + +// NewEmptyFile constructs a new file with no content, ready to be mutated +// by other calls that append to its body. +func NewEmptyFile() *File { + f := &File{ + inTree: newInTree(), + } + body := newBody() + f.body = f.children.Append(body) + return f +} + +// Body returns the root body of the file, which contains the top-level +// attributes and blocks. +func (f *File) Body() *Body { + return f.body.content.(*Body) +} + +// WriteTo writes the tokens underlying the receiving file to the given writer. +// +// The tokens first have a simple formatting pass applied that adjusts only +// the spaces between them. +func (f *File) WriteTo(wr io.Writer) (int64, error) { + tokens := f.inTree.children.BuildTokens(nil) + format(tokens) + return tokens.WriteTo(wr) +} + +// Bytes returns a buffer containing the source code resulting from the +// tokens underlying the receiving file. If any updates have been made via +// the AST API, these will be reflected in the result. +func (f *File) Bytes() []byte { + buf := &bytes.Buffer{} + f.WriteTo(buf) + return buf.Bytes() +} + +type comments struct { + leafNode + + parent *node + tokens Tokens +} + +func newComments(tokens Tokens) *comments { + return &comments{ + tokens: tokens, + } +} + +func (c *comments) BuildTokens(to Tokens) Tokens { + return c.tokens.BuildTokens(to) +} + +type identifier struct { + leafNode + + parent *node + token *Token +} + +func newIdentifier(token *Token) *identifier { + return &identifier{ + token: token, + } +} + +func (i *identifier) BuildTokens(to Tokens) Tokens { + return append(to, i.token) +} + +func (i *identifier) hasName(name string) bool { + return name == string(i.token.Bytes) +} + +type number struct { + leafNode + + parent *node + token *Token +} + +func newNumber(token *Token) *number { + return &number{ + token: token, + } +} + +func (n *number) BuildTokens(to Tokens) Tokens { + return append(to, n.token) +} + +type quoted struct { + leafNode + + parent *node + tokens Tokens +} + +func newQuoted(tokens Tokens) *quoted { + return "ed{ + tokens: tokens, + } +} + +func (q *quoted) BuildTokens(to Tokens) Tokens { + return q.tokens.BuildTokens(to) +} diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/ast_attribute.go b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_attribute.go new file mode 100644 index 0000000000..975fa74280 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_attribute.go @@ -0,0 +1,48 @@ +package hclwrite + +import ( + "github.com/hashicorp/hcl2/hcl/hclsyntax" +) + +type Attribute struct { + inTree + + leadComments *node + name *node + expr *node + lineComments *node +} + +func newAttribute() *Attribute { + return &Attribute{ + inTree: newInTree(), + } +} + +func (a *Attribute) init(name string, expr *Expression) { + expr.assertUnattached() + + nameTok := newIdentToken(name) + nameObj := newIdentifier(nameTok) + a.leadComments = a.children.Append(newComments(nil)) + a.name = a.children.Append(nameObj) + a.children.AppendUnstructuredTokens(Tokens{ + { + Type: hclsyntax.TokenEqual, + Bytes: []byte{'='}, + }, + }) + a.expr = a.children.Append(expr) + a.expr.list = a.children + a.lineComments = a.children.Append(newComments(nil)) + a.children.AppendUnstructuredTokens(Tokens{ + { + Type: hclsyntax.TokenNewline, + Bytes: []byte{'\n'}, + }, + }) +} + +func (a *Attribute) Expr() *Expression { + return a.expr.content.(*Expression) +} diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/ast_block.go b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_block.go new file mode 100644 index 0000000000..d5fd32bd51 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_block.go @@ -0,0 +1,74 @@ +package hclwrite + +import ( + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" +) + +type Block struct { + inTree + + leadComments *node + typeName *node + labels nodeSet + open *node + body *node + close *node +} + +func newBlock() *Block { + return &Block{ + inTree: newInTree(), + labels: newNodeSet(), + } +} + +// NewBlock constructs a new, empty block with the given type name and labels. +func NewBlock(typeName string, labels []string) *Block { + block := newBlock() + block.init(typeName, labels) + return block +} + +func (b *Block) init(typeName string, labels []string) { + nameTok := newIdentToken(typeName) + nameObj := newIdentifier(nameTok) + b.leadComments = b.children.Append(newComments(nil)) + b.typeName = b.children.Append(nameObj) + for _, label := range labels { + labelToks := TokensForValue(cty.StringVal(label)) + labelObj := newQuoted(labelToks) + labelNode := b.children.Append(labelObj) + b.labels.Add(labelNode) + } + b.open = b.children.AppendUnstructuredTokens(Tokens{ + { + Type: hclsyntax.TokenOBrace, + Bytes: []byte{'{'}, + }, + { + Type: hclsyntax.TokenNewline, + Bytes: []byte{'\n'}, + }, + }) + body := newBody() // initially totally empty; caller can append to it subsequently + b.body = b.children.Append(body) + b.close = b.children.AppendUnstructuredTokens(Tokens{ + { + Type: hclsyntax.TokenCBrace, + Bytes: []byte{'}'}, + }, + { + Type: hclsyntax.TokenNewline, + Bytes: []byte{'\n'}, + }, + }) +} + +// Body returns the body that represents the content of the receiving block. +// +// Appending to or otherwise modifying this body will make changes to the +// tokens that are generated between the blocks open and close braces. +func (b *Block) Body() *Body { + return b.body.content.(*Body) +} diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/ast_body.go b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_body.go new file mode 100644 index 0000000000..cf69fee215 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_body.go @@ -0,0 +1,153 @@ +package hclwrite + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" +) + +type Body struct { + inTree + + items nodeSet +} + +func newBody() *Body { + return &Body{ + inTree: newInTree(), + items: newNodeSet(), + } +} + +func (b *Body) appendItem(c nodeContent) *node { + nn := b.children.Append(c) + b.items.Add(nn) + return nn +} + +func (b *Body) appendItemNode(nn *node) *node { + nn.assertUnattached() + b.children.AppendNode(nn) + b.items.Add(nn) + return nn +} + +// Clear removes all of the items from the body, making it empty. +func (b *Body) Clear() { + b.children.Clear() +} + +func (b *Body) AppendUnstructuredTokens(ts Tokens) { + b.inTree.children.Append(ts) +} + +// Attributes returns a new map of all of the attributes in the body, with +// the attribute names as the keys. +func (b *Body) Attributes() map[string]*Attribute { + ret := make(map[string]*Attribute) + for n := range b.items { + if attr, isAttr := n.content.(*Attribute); isAttr { + nameObj := attr.name.content.(*identifier) + name := string(nameObj.token.Bytes) + ret[name] = attr + } + } + return ret +} + +// Blocks returns a new slice of all the blocks in the body. +func (b *Body) Blocks() []*Block { + ret := make([]*Block, 0, len(b.items)) + for n := range b.items { + if block, isBlock := n.content.(*Block); isBlock { + ret = append(ret, block) + } + } + return ret +} + +// GetAttribute returns the attribute from the body that has the given name, +// or returns nil if there is currently no matching attribute. +func (b *Body) GetAttribute(name string) *Attribute { + for n := range b.items { + if attr, isAttr := n.content.(*Attribute); isAttr { + nameObj := attr.name.content.(*identifier) + if nameObj.hasName(name) { + // We've found it! + return attr + } + } + } + + return nil +} + +// SetAttributeValue either replaces the expression of an existing attribute +// of the given name or adds a new attribute definition to the end of the block. +// +// The value is given as a cty.Value, and must therefore be a literal. To set +// a variable reference or other traversal, use SetAttributeTraversal. +// +// The return value is the attribute that was either modified in-place or +// created. +func (b *Body) SetAttributeValue(name string, val cty.Value) *Attribute { + attr := b.GetAttribute(name) + expr := NewExpressionLiteral(val) + if attr != nil { + attr.expr = attr.expr.ReplaceWith(expr) + } else { + attr := newAttribute() + attr.init(name, expr) + b.appendItem(attr) + } + return attr +} + +// SetAttributeTraversal either replaces the expression of an existing attribute +// of the given name or adds a new attribute definition to the end of the body. +// +// The new expression is given as a hcl.Traversal, which must be an absolute +// traversal. To set a literal value, use SetAttributeValue. +// +// The return value is the attribute that was either modified in-place or +// created. +func (b *Body) SetAttributeTraversal(name string, traversal hcl.Traversal) *Attribute { + attr := b.GetAttribute(name) + expr := NewExpressionAbsTraversal(traversal) + if attr != nil { + attr.expr = attr.expr.ReplaceWith(expr) + } else { + attr := newAttribute() + attr.init(name, expr) + b.appendItem(attr) + } + return attr +} + +// AppendBlock appends an existing block (which must not be already attached +// to a body) to the end of the receiving body. +func (b *Body) AppendBlock(block *Block) *Block { + b.appendItem(block) + return block +} + +// AppendNewBlock appends a new nested block to the end of the receiving body +// with the given type name and labels. +func (b *Body) AppendNewBlock(typeName string, labels []string) *Block { + block := newBlock() + block.init(typeName, labels) + b.appendItem(block) + return block +} + +// AppendNewline appends a newline token to th end of the receiving body, +// which generally serves as a separator between different sets of body +// contents. +func (b *Body) AppendNewline() { + b.AppendUnstructuredTokens(Tokens{ + { + Type: hclsyntax.TokenNewline, + Bytes: []byte{'\n'}, + }, + }) +} diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/ast_expression.go b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_expression.go new file mode 100644 index 0000000000..62d89fbefc --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_expression.go @@ -0,0 +1,201 @@ +package hclwrite + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" +) + +type Expression struct { + inTree + + absTraversals nodeSet +} + +func newExpression() *Expression { + return &Expression{ + inTree: newInTree(), + absTraversals: newNodeSet(), + } +} + +// NewExpressionLiteral constructs an an expression that represents the given +// literal value. +// +// Since an unknown value cannot be represented in source code, this function +// will panic if the given value is unknown or contains a nested unknown value. +// Use val.IsWhollyKnown before calling to be sure. +// +// HCL native syntax does not directly represent lists, maps, and sets, and +// instead relies on the automatic conversions to those collection types from +// either list or tuple constructor syntax. Therefore converting collection +// values to source code and re-reading them will lose type information, and +// the reader must provide a suitable type at decode time to recover the +// original value. +func NewExpressionLiteral(val cty.Value) *Expression { + toks := TokensForValue(val) + expr := newExpression() + expr.children.AppendUnstructuredTokens(toks) + return expr +} + +// NewExpressionAbsTraversal constructs an expression that represents the +// given traversal, which must be absolute or this function will panic. +func NewExpressionAbsTraversal(traversal hcl.Traversal) *Expression { + if traversal.IsRelative() { + panic("can't construct expression from relative traversal") + } + + physT := newTraversal() + rootName := traversal.RootName() + steps := traversal[1:] + + { + tn := newTraverseName() + tn.name = tn.children.Append(newIdentifier(&Token{ + Type: hclsyntax.TokenIdent, + Bytes: []byte(rootName), + })) + physT.steps.Add(physT.children.Append(tn)) + } + + for _, step := range steps { + switch ts := step.(type) { + case hcl.TraverseAttr: + tn := newTraverseName() + tn.children.AppendUnstructuredTokens(Tokens{ + { + Type: hclsyntax.TokenDot, + Bytes: []byte{'.'}, + }, + }) + tn.name = tn.children.Append(newIdentifier(&Token{ + Type: hclsyntax.TokenIdent, + Bytes: []byte(ts.Name), + })) + physT.steps.Add(physT.children.Append(tn)) + case hcl.TraverseIndex: + ti := newTraverseIndex() + ti.children.AppendUnstructuredTokens(Tokens{ + { + Type: hclsyntax.TokenOBrack, + Bytes: []byte{'['}, + }, + }) + indexExpr := NewExpressionLiteral(ts.Key) + ti.key = ti.children.Append(indexExpr) + ti.children.AppendUnstructuredTokens(Tokens{ + { + Type: hclsyntax.TokenCBrack, + Bytes: []byte{']'}, + }, + }) + physT.steps.Add(physT.children.Append(ti)) + } + } + + expr := newExpression() + expr.absTraversals.Add(expr.children.Append(physT)) + return expr +} + +// Variables returns the absolute traversals that exist within the receiving +// expression. +func (e *Expression) Variables() []*Traversal { + nodes := e.absTraversals.List() + ret := make([]*Traversal, len(nodes)) + for i, node := range nodes { + ret[i] = node.content.(*Traversal) + } + return ret +} + +// RenameVariablePrefix examines each of the absolute traversals in the +// receiving expression to see if they have the given sequence of names as +// a prefix prefix. If so, they are updated in place to have the given +// replacement names instead of that prefix. +// +// This can be used to implement symbol renaming. The calling application can +// visit all relevant expressions in its input and apply the same renaming +// to implement a global symbol rename. +// +// The search and replacement traversals must be the same length, or this +// method will panic. Only attribute access operations can be matched and +// replaced. Index steps never match the prefix. +func (e *Expression) RenameVariablePrefix(search, replacement []string) { + if len(search) != len(replacement) { + panic(fmt.Sprintf("search and replacement length mismatch (%d and %d)", len(search), len(replacement))) + } +Traversals: + for node := range e.absTraversals { + traversal := node.content.(*Traversal) + if len(traversal.steps) < len(search) { + // If it's shorter then it can't have our prefix + continue + } + + stepNodes := traversal.steps.List() + for i, name := range search { + step, isName := stepNodes[i].content.(*TraverseName) + if !isName { + continue Traversals // only name nodes can match + } + foundNameBytes := step.name.content.(*identifier).token.Bytes + if len(foundNameBytes) != len(name) { + continue Traversals + } + if string(foundNameBytes) != name { + continue Traversals + } + } + + // If we get here then the prefix matched, so now we'll swap in + // the replacement strings. + for i, name := range replacement { + step := stepNodes[i].content.(*TraverseName) + token := step.name.content.(*identifier).token + token.Bytes = []byte(name) + } + } +} + +// Traversal represents a sequence of variable, attribute, and/or index +// operations. +type Traversal struct { + inTree + + steps nodeSet +} + +func newTraversal() *Traversal { + return &Traversal{ + inTree: newInTree(), + steps: newNodeSet(), + } +} + +type TraverseName struct { + inTree + + name *node +} + +func newTraverseName() *TraverseName { + return &TraverseName{ + inTree: newInTree(), + } +} + +type TraverseIndex struct { + inTree + + key *node +} + +func newTraverseIndex() *TraverseIndex { + return &TraverseIndex{ + inTree: newInTree(), + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/doc.go b/vendor/github.com/hashicorp/hcl2/hclwrite/doc.go new file mode 100644 index 0000000000..56d5b77526 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hclwrite/doc.go @@ -0,0 +1,11 @@ +// Package hclwrite deals with the problem of generating HCL configuration +// and of making specific surgical changes to existing HCL configurations. +// +// It operates at a different level of abstraction than the main HCL parser +// and AST, since details such as the placement of comments and newlines +// are preserved when unchanged. +// +// The hclwrite API follows a similar principle to XML/HTML DOM, allowing nodes +// to be read out, created and inserted, etc. Nodes represent syntax constructs +// rather than semantic concepts. +package hclwrite diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/format.go b/vendor/github.com/hashicorp/hcl2/hclwrite/format.go new file mode 100644 index 0000000000..22b1919056 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hclwrite/format.go @@ -0,0 +1,492 @@ +package hclwrite + +import ( + "github.com/hashicorp/hcl2/hcl/hclsyntax" +) + +var inKeyword = hclsyntax.Keyword([]byte{'i', 'n'}) + +// placeholder token used when we don't have a token but we don't want +// to pass a real "nil" and complicate things with nil pointer checks +var nilToken = &Token{ + Type: hclsyntax.TokenNil, + Bytes: []byte{}, + SpacesBefore: 0, +} + +// format rewrites tokens within the given sequence, in-place, to adjust the +// whitespace around their content to achieve canonical formatting. +func format(tokens Tokens) { + // Formatting is a multi-pass process. More details on the passes below, + // but this is the overview: + // - adjust the leading space on each line to create appropriate + // indentation + // - adjust spaces between tokens in a single cell using a set of rules + // - adjust the leading space in the "assign" and "comment" cells on each + // line to vertically align with neighboring lines. + // All of these steps operate in-place on the given tokens, so a caller + // may collect a flat sequence of all of the tokens underlying an AST + // and pass it here and we will then indirectly modify the AST itself. + // Formatting must change only whitespace. Specifically, that means + // changing the SpacesBefore attribute on a token while leaving the + // other token attributes unchanged. + + lines := linesForFormat(tokens) + formatIndent(lines) + formatSpaces(lines) + formatCells(lines) +} + +func formatIndent(lines []formatLine) { + // Our methodology for indents is to take the input one line at a time + // and count the bracketing delimiters on each line. If a line has a net + // increase in open brackets, we increase the indent level by one and + // remember how many new openers we had. If the line has a net _decrease_, + // we'll compare it to the most recent number of openers and decrease the + // dedent level by one each time we pass an indent level remembered + // earlier. + // The "indent stack" used here allows for us to recognize degenerate + // input where brackets are not symmetrical within lines and avoid + // pushing things too far left or right, creating confusion. + + // We'll start our indent stack at a reasonable capacity to minimize the + // chance of us needing to grow it; 10 here means 10 levels of indent, + // which should be more than enough for reasonable HCL uses. + indents := make([]int, 0, 10) + + inHeredoc := false + for i := range lines { + line := &lines[i] + if len(line.lead) == 0 { + continue + } + + if inHeredoc { + for _, token := range line.lead { + if token.Type == hclsyntax.TokenCHeredoc { + inHeredoc = false + } + } + continue // don't touch indentation inside heredocs + } + + if line.lead[0].Type == hclsyntax.TokenNewline { + // Never place spaces before a newline + line.lead[0].SpacesBefore = 0 + continue + } + + netBrackets := 0 + for _, token := range line.lead { + netBrackets += tokenBracketChange(token) + if token.Type == hclsyntax.TokenOHeredoc { + inHeredoc = true + } + } + for _, token := range line.assign { + netBrackets += tokenBracketChange(token) + } + + switch { + case netBrackets > 0: + line.lead[0].SpacesBefore = 2 * len(indents) + indents = append(indents, netBrackets) + case netBrackets < 0: + closed := -netBrackets + for closed > 0 && len(indents) > 0 { + switch { + + case closed > indents[len(indents)-1]: + closed -= indents[len(indents)-1] + indents = indents[:len(indents)-1] + + case closed < indents[len(indents)-1]: + indents[len(indents)-1] -= closed + closed = 0 + + default: + indents = indents[:len(indents)-1] + closed = 0 + } + } + line.lead[0].SpacesBefore = 2 * len(indents) + default: + line.lead[0].SpacesBefore = 2 * len(indents) + } + } +} + +func formatSpaces(lines []formatLine) { + for _, line := range lines { + for i, token := range line.lead { + var before, after *Token + if i > 0 { + before = line.lead[i-1] + } else { + before = nilToken + } + if i < (len(line.lead) - 1) { + after = line.lead[i+1] + } else { + after = nilToken + } + if spaceAfterToken(token, before, after) { + after.SpacesBefore = 1 + } else { + after.SpacesBefore = 0 + } + } + for i, token := range line.assign { + if i == 0 { + // first token in "assign" always has one space before to + // separate the equals sign from what it's assigning. + token.SpacesBefore = 1 + } + + var before, after *Token + if i > 0 { + before = line.assign[i-1] + } else { + before = nilToken + } + if i < (len(line.assign) - 1) { + after = line.assign[i+1] + } else { + after = nilToken + } + if spaceAfterToken(token, before, after) { + after.SpacesBefore = 1 + } else { + after.SpacesBefore = 0 + } + } + + } +} + +func formatCells(lines []formatLine) { + + chainStart := -1 + maxColumns := 0 + + // We'll deal with the "assign" cell first, since moving that will + // also impact the "comment" cell. + closeAssignChain := func(i int) { + for _, chainLine := range lines[chainStart:i] { + columns := chainLine.lead.Columns() + spaces := (maxColumns - columns) + 1 + chainLine.assign[0].SpacesBefore = spaces + } + chainStart = -1 + maxColumns = 0 + } + for i, line := range lines { + if line.assign == nil { + if chainStart != -1 { + closeAssignChain(i) + } + } else { + if chainStart == -1 { + chainStart = i + } + columns := line.lead.Columns() + if columns > maxColumns { + maxColumns = columns + } + } + } + if chainStart != -1 { + closeAssignChain(len(lines)) + } + + // Now we'll deal with the comments + closeCommentChain := func(i int) { + for _, chainLine := range lines[chainStart:i] { + columns := chainLine.lead.Columns() + chainLine.assign.Columns() + spaces := (maxColumns - columns) + 1 + chainLine.comment[0].SpacesBefore = spaces + } + chainStart = -1 + maxColumns = 0 + } + for i, line := range lines { + if line.comment == nil { + if chainStart != -1 { + closeCommentChain(i) + } + } else { + if chainStart == -1 { + chainStart = i + } + columns := line.lead.Columns() + line.assign.Columns() + if columns > maxColumns { + maxColumns = columns + } + } + } + if chainStart != -1 { + closeCommentChain(len(lines)) + } + +} + +// spaceAfterToken decides whether a particular subject token should have a +// space after it when surrounded by the given before and after tokens. +// "before" can be TokenNil, if the subject token is at the start of a sequence. +func spaceAfterToken(subject, before, after *Token) bool { + switch { + + case after.Type == hclsyntax.TokenNewline || after.Type == hclsyntax.TokenNil: + // Never add spaces before a newline + return false + + case subject.Type == hclsyntax.TokenIdent && after.Type == hclsyntax.TokenOParen: + // Don't split a function name from open paren in a call + return false + + case subject.Type == hclsyntax.TokenDot || after.Type == hclsyntax.TokenDot: + // Don't use spaces around attribute access dots + return false + + case after.Type == hclsyntax.TokenComma: + // No space right before a comma in an argument list + return false + + case subject.Type == hclsyntax.TokenComma: + // Always a space after a comma + return true + + case subject.Type == hclsyntax.TokenQuotedLit || subject.Type == hclsyntax.TokenStringLit || subject.Type == hclsyntax.TokenOQuote || subject.Type == hclsyntax.TokenOHeredoc || after.Type == hclsyntax.TokenQuotedLit || after.Type == hclsyntax.TokenStringLit || after.Type == hclsyntax.TokenCQuote || after.Type == hclsyntax.TokenCHeredoc: + // No extra spaces within templates + return false + + case inKeyword.TokenMatches(subject.asHCLSyntax()) && before.Type == hclsyntax.TokenIdent: + // This is a special case for inside for expressions where a user + // might want to use a literal tuple constructor: + // [for x in [foo]: x] + // ... in that case, we would normally produce in[foo] thinking that + // in is a reference, but we'll recognize it as a keyword here instead + // to make the result less confusing. + return true + + case after.Type == hclsyntax.TokenOBrack && (subject.Type == hclsyntax.TokenIdent || subject.Type == hclsyntax.TokenNumberLit || tokenBracketChange(subject) < 0): + return false + + case subject.Type == hclsyntax.TokenMinus: + // Since a minus can either be subtraction or negation, and the latter + // should _not_ have a space after it, we need to use some heuristics + // to decide which case this is. + // We guess that we have a negation if the token before doesn't look + // like it could be the end of an expression. + + switch before.Type { + + case hclsyntax.TokenNil: + // Minus at the start of input must be a negation + return false + + case hclsyntax.TokenOParen, hclsyntax.TokenOBrace, hclsyntax.TokenOBrack, hclsyntax.TokenEqual, hclsyntax.TokenColon, hclsyntax.TokenComma, hclsyntax.TokenQuestion: + // Minus immediately after an opening bracket or separator must be a negation. + return false + + case hclsyntax.TokenPlus, hclsyntax.TokenStar, hclsyntax.TokenSlash, hclsyntax.TokenPercent, hclsyntax.TokenMinus: + // Minus immediately after another arithmetic operator must be negation. + return false + + case hclsyntax.TokenEqualOp, hclsyntax.TokenNotEqual, hclsyntax.TokenGreaterThan, hclsyntax.TokenGreaterThanEq, hclsyntax.TokenLessThan, hclsyntax.TokenLessThanEq: + // Minus immediately after another comparison operator must be negation. + return false + + case hclsyntax.TokenAnd, hclsyntax.TokenOr, hclsyntax.TokenBang: + // Minus immediately after logical operator doesn't make sense but probably intended as negation. + return false + + default: + return true + } + + case subject.Type == hclsyntax.TokenOBrace || after.Type == hclsyntax.TokenCBrace: + // Unlike other bracket types, braces have spaces on both sides of them, + // both in single-line nested blocks foo { bar = baz } and in object + // constructor expressions foo = { bar = baz }. + if subject.Type == hclsyntax.TokenOBrace && after.Type == hclsyntax.TokenCBrace { + // An open brace followed by a close brace is an exception, however. + // e.g. foo {} rather than foo { } + return false + } + return true + + // In the unlikely event that an interpolation expression is just + // a single object constructor, we'll put a space between the ${ and + // the following { to make this more obvious, and then the same + // thing for the two braces at the end. + case (subject.Type == hclsyntax.TokenTemplateInterp || subject.Type == hclsyntax.TokenTemplateControl) && after.Type == hclsyntax.TokenOBrace: + return true + case subject.Type == hclsyntax.TokenCBrace && after.Type == hclsyntax.TokenTemplateSeqEnd: + return true + + // Don't add spaces between interpolated items + case subject.Type == hclsyntax.TokenTemplateSeqEnd && after.Type == hclsyntax.TokenTemplateInterp: + return false + + case tokenBracketChange(subject) > 0: + // No spaces after open brackets + return false + + case tokenBracketChange(after) < 0: + // No spaces before close brackets + return false + + default: + // Most tokens are space-separated + return true + + } +} + +func linesForFormat(tokens Tokens) []formatLine { + if len(tokens) == 0 { + return make([]formatLine, 0) + } + + // first we'll count our lines, so we can allocate the array for them in + // a single block. (We want to minimize memory pressure in this codepath, + // so it can be run somewhat-frequently by editor integrations.) + lineCount := 1 // if there are zero newlines then there is one line + for _, tok := range tokens { + if tokenIsNewline(tok) { + lineCount++ + } + } + + // To start, we'll just put everything in the "lead" cell on each line, + // and then do another pass over the lines afterwards to adjust. + lines := make([]formatLine, lineCount) + li := 0 + lineStart := 0 + for i, tok := range tokens { + if tok.Type == hclsyntax.TokenEOF { + // The EOF token doesn't belong to any line, and terminates the + // token sequence. + lines[li].lead = tokens[lineStart:i] + break + } + + if tokenIsNewline(tok) { + lines[li].lead = tokens[lineStart : i+1] + lineStart = i + 1 + li++ + } + } + + // If a set of tokens doesn't end in TokenEOF (e.g. because it's a + // fragment of tokens from the middle of a file) then we might fall + // out here with a line still pending. + if lineStart < len(tokens) { + lines[li].lead = tokens[lineStart:] + if lines[li].lead[len(lines[li].lead)-1].Type == hclsyntax.TokenEOF { + lines[li].lead = lines[li].lead[:len(lines[li].lead)-1] + } + } + + // Now we'll pick off any trailing comments and attribute assignments + // to shuffle off into the "comment" and "assign" cells. + inHeredoc := false + for i := range lines { + line := &lines[i] + if len(line.lead) == 0 { + // if the line is empty then there's nothing for us to do + // (this should happen only for the final line, because all other + // lines would have a newline token of some kind) + continue + } + + if inHeredoc { + for _, tok := range line.lead { + if tok.Type == hclsyntax.TokenCHeredoc { + inHeredoc = false + break + } + } + // Inside a heredoc everything is "lead", even if there's a + // template interpolation embedded in there that might otherwise + // confuse our logic below. + continue + } + + for _, tok := range line.lead { + if tok.Type == hclsyntax.TokenOHeredoc { + inHeredoc = true + break + } + } + + if len(line.lead) > 1 && line.lead[len(line.lead)-1].Type == hclsyntax.TokenComment { + line.comment = line.lead[len(line.lead)-1:] + line.lead = line.lead[:len(line.lead)-1] + } + + for i, tok := range line.lead { + if i > 0 && tok.Type == hclsyntax.TokenEqual { + // We only move the tokens into "assign" if the RHS seems to + // be a whole expression, which we determine by counting + // brackets. If there's a net positive number of brackets + // then that suggests we're introducing a multi-line expression. + netBrackets := 0 + for _, token := range line.lead[i:] { + netBrackets += tokenBracketChange(token) + } + + if netBrackets == 0 { + line.assign = line.lead[i:] + line.lead = line.lead[:i] + } + break + } + } + } + + return lines +} + +func tokenIsNewline(tok *Token) bool { + if tok.Type == hclsyntax.TokenNewline { + return true + } else if tok.Type == hclsyntax.TokenComment { + // Single line tokens (# and //) consume their terminating newline, + // so we need to treat them as newline tokens as well. + if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' { + return true + } + } + return false +} + +func tokenBracketChange(tok *Token) int { + switch tok.Type { + case hclsyntax.TokenOBrace, hclsyntax.TokenOBrack, hclsyntax.TokenOParen, hclsyntax.TokenTemplateControl, hclsyntax.TokenTemplateInterp: + return 1 + case hclsyntax.TokenCBrace, hclsyntax.TokenCBrack, hclsyntax.TokenCParen, hclsyntax.TokenTemplateSeqEnd: + return -1 + default: + return 0 + } +} + +// formatLine represents a single line of source code for formatting purposes, +// splitting its tokens into up to three "cells": +// +// lead: always present, representing everything up to one of the others +// assign: if line contains an attribute assignment, represents the tokens +// starting at (and including) the equals symbol +// comment: if line contains any non-comment tokens and ends with a +// single-line comment token, represents the comment. +// +// When formatting, the leading spaces of the first tokens in each of these +// cells is adjusted to align vertically their occurences on consecutive +// rows. +type formatLine struct { + lead Tokens + assign Tokens + comment Tokens +} diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/generate.go b/vendor/github.com/hashicorp/hcl2/hclwrite/generate.go new file mode 100644 index 0000000000..d249cfdf9a --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hclwrite/generate.go @@ -0,0 +1,250 @@ +package hclwrite + +import ( + "fmt" + "unicode" + "unicode/utf8" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" +) + +// TokensForValue returns a sequence of tokens that represents the given +// constant value. +// +// This function only supports types that are used by HCL. In particular, it +// does not support capsule types and will panic if given one. +// +// It is not possible to express an unknown value in source code, so this +// function will panic if the given value is unknown or contains any unknown +// values. A caller can call the value's IsWhollyKnown method to verify that +// no unknown values are present before calling TokensForValue. +func TokensForValue(val cty.Value) Tokens { + toks := appendTokensForValue(val, nil) + format(toks) // fiddle with the SpacesBefore field to get canonical spacing + return toks +} + +// TokensForTraversal returns a sequence of tokens that represents the given +// traversal. +// +// If the traversal is absolute then the result is a self-contained, valid +// reference expression. If the traversal is relative then the returned tokens +// could be appended to some other expression tokens to traverse into the +// represented expression. +func TokensForTraversal(traversal hcl.Traversal) Tokens { + toks := appendTokensForTraversal(traversal, nil) + format(toks) // fiddle with the SpacesBefore field to get canonical spacing + return toks +} + +func appendTokensForValue(val cty.Value, toks Tokens) Tokens { + switch { + + case !val.IsKnown(): + panic("cannot produce tokens for unknown value") + + case val.IsNull(): + toks = append(toks, &Token{ + Type: hclsyntax.TokenIdent, + Bytes: []byte(`null`), + }) + + case val.Type() == cty.Bool: + var src []byte + if val.True() { + src = []byte(`true`) + } else { + src = []byte(`false`) + } + toks = append(toks, &Token{ + Type: hclsyntax.TokenIdent, + Bytes: src, + }) + + case val.Type() == cty.Number: + bf := val.AsBigFloat() + srcStr := bf.Text('f', -1) + toks = append(toks, &Token{ + Type: hclsyntax.TokenNumberLit, + Bytes: []byte(srcStr), + }) + + case val.Type() == cty.String: + // TODO: If it's a multi-line string ending in a newline, format + // it as a HEREDOC instead. + src := escapeQuotedStringLit(val.AsString()) + toks = append(toks, &Token{ + Type: hclsyntax.TokenOQuote, + Bytes: []byte{'"'}, + }) + if len(src) > 0 { + toks = append(toks, &Token{ + Type: hclsyntax.TokenQuotedLit, + Bytes: src, + }) + } + toks = append(toks, &Token{ + Type: hclsyntax.TokenCQuote, + Bytes: []byte{'"'}, + }) + + case val.Type().IsListType() || val.Type().IsSetType() || val.Type().IsTupleType(): + toks = append(toks, &Token{ + Type: hclsyntax.TokenOBrack, + Bytes: []byte{'['}, + }) + + i := 0 + for it := val.ElementIterator(); it.Next(); { + if i > 0 { + toks = append(toks, &Token{ + Type: hclsyntax.TokenComma, + Bytes: []byte{','}, + }) + } + _, eVal := it.Element() + toks = appendTokensForValue(eVal, toks) + i++ + } + + toks = append(toks, &Token{ + Type: hclsyntax.TokenCBrack, + Bytes: []byte{']'}, + }) + + case val.Type().IsMapType() || val.Type().IsObjectType(): + toks = append(toks, &Token{ + Type: hclsyntax.TokenOBrace, + Bytes: []byte{'{'}, + }) + + i := 0 + for it := val.ElementIterator(); it.Next(); { + if i > 0 { + toks = append(toks, &Token{ + Type: hclsyntax.TokenComma, + Bytes: []byte{','}, + }) + } + eKey, eVal := it.Element() + if hclsyntax.ValidIdentifier(eKey.AsString()) { + toks = append(toks, &Token{ + Type: hclsyntax.TokenIdent, + Bytes: []byte(eKey.AsString()), + }) + } else { + toks = appendTokensForValue(eKey, toks) + } + toks = append(toks, &Token{ + Type: hclsyntax.TokenEqual, + Bytes: []byte{'='}, + }) + toks = appendTokensForValue(eVal, toks) + i++ + } + + toks = append(toks, &Token{ + Type: hclsyntax.TokenCBrace, + Bytes: []byte{'}'}, + }) + + default: + panic(fmt.Sprintf("cannot produce tokens for %#v", val)) + } + + return toks +} + +func appendTokensForTraversal(traversal hcl.Traversal, toks Tokens) Tokens { + for _, step := range traversal { + appendTokensForTraversalStep(step, toks) + } + return toks +} + +func appendTokensForTraversalStep(step hcl.Traverser, toks Tokens) { + switch ts := step.(type) { + case hcl.TraverseRoot: + toks = append(toks, &Token{ + Type: hclsyntax.TokenIdent, + Bytes: []byte(ts.Name), + }) + case hcl.TraverseAttr: + toks = append( + toks, + &Token{ + Type: hclsyntax.TokenDot, + Bytes: []byte{'.'}, + }, + &Token{ + Type: hclsyntax.TokenIdent, + Bytes: []byte(ts.Name), + }, + ) + case hcl.TraverseIndex: + toks = append(toks, &Token{ + Type: hclsyntax.TokenOBrack, + Bytes: []byte{'['}, + }) + appendTokensForValue(ts.Key, toks) + toks = append(toks, &Token{ + Type: hclsyntax.TokenCBrack, + Bytes: []byte{']'}, + }) + default: + panic(fmt.Sprintf("unsupported traversal step type %T", step)) + } +} + +func escapeQuotedStringLit(s string) []byte { + if len(s) == 0 { + return nil + } + buf := make([]byte, 0, len(s)) + for i, r := range s { + switch r { + case '\n': + buf = append(buf, '\\', 'n') + case '\r': + buf = append(buf, '\\', 'r') + case '\t': + buf = append(buf, '\\', 't') + case '"': + buf = append(buf, '\\', '"') + case '\\': + buf = append(buf, '\\', '\\') + case '$', '%': + buf = appendRune(buf, r) + remain := s[i+1:] + if len(remain) > 0 && remain[0] == '{' { + // Double up our template introducer symbol to escape it. + buf = appendRune(buf, r) + } + default: + if !unicode.IsPrint(r) { + var fmted string + if r < 65536 { + fmted = fmt.Sprintf("\\u%04x", r) + } else { + fmted = fmt.Sprintf("\\U%08x", r) + } + buf = append(buf, fmted...) + } else { + buf = appendRune(buf, r) + } + } + } + return buf +} + +func appendRune(b []byte, r rune) []byte { + l := utf8.RuneLen(r) + for i := 0; i < l; i++ { + b = append(b, 0) // make room at the end of our buffer + } + ch := b[len(b)-l:] + utf8.EncodeRune(ch, r) + return b +} diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/native_node_sorter.go b/vendor/github.com/hashicorp/hcl2/hclwrite/native_node_sorter.go new file mode 100644 index 0000000000..a13c0ec419 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hclwrite/native_node_sorter.go @@ -0,0 +1,23 @@ +package hclwrite + +import ( + "github.com/hashicorp/hcl2/hcl/hclsyntax" +) + +type nativeNodeSorter struct { + Nodes []hclsyntax.Node +} + +func (s nativeNodeSorter) Len() int { + return len(s.Nodes) +} + +func (s nativeNodeSorter) Less(i, j int) bool { + rangeI := s.Nodes[i].Range() + rangeJ := s.Nodes[j].Range() + return rangeI.Start.Byte < rangeJ.Start.Byte +} + +func (s nativeNodeSorter) Swap(i, j int) { + s.Nodes[i], s.Nodes[j] = s.Nodes[j], s.Nodes[i] +} diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/node.go b/vendor/github.com/hashicorp/hcl2/hclwrite/node.go new file mode 100644 index 0000000000..71fd00faf6 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hclwrite/node.go @@ -0,0 +1,236 @@ +package hclwrite + +import ( + "fmt" + + "github.com/google/go-cmp/cmp" +) + +// node represents a node in the AST. +type node struct { + content nodeContent + + list *nodes + before, after *node +} + +func newNode(c nodeContent) *node { + return &node{ + content: c, + } +} + +func (n *node) Equal(other *node) bool { + return cmp.Equal(n.content, other.content) +} + +func (n *node) BuildTokens(to Tokens) Tokens { + return n.content.BuildTokens(to) +} + +// Detach removes the receiver from the list it currently belongs to. If the +// node is not currently in a list, this is a no-op. +func (n *node) Detach() { + if n.list == nil { + return + } + if n.before != nil { + n.before.after = n.after + } + if n.after != nil { + n.after.before = n.before + } + if n.list.first == n { + n.list.first = n.after + } + if n.list.last == n { + n.list.last = n.before + } + n.list = nil + n.before = nil + n.after = nil +} + +// ReplaceWith removes the receiver from the list it currently belongs to and +// inserts a new node with the given content in its place. If the node is not +// currently in a list, this function will panic. +// +// The return value is the newly-constructed node, containing the given content. +// After this function returns, the reciever is no longer attached to a list. +func (n *node) ReplaceWith(c nodeContent) *node { + if n.list == nil { + panic("can't replace node that is not in a list") + } + + before := n.before + after := n.after + list := n.list + n.before, n.after, n.list = nil, nil, nil + + nn := newNode(c) + nn.before = before + nn.after = after + nn.list = list + if before != nil { + before.after = nn + } + if after != nil { + after.before = nn + } + return nn +} + +func (n *node) assertUnattached() { + if n.list != nil { + panic(fmt.Sprintf("attempt to attach already-attached node %#v", n)) + } +} + +// nodeContent is the interface type implemented by all AST content types. +type nodeContent interface { + walkChildNodes(w internalWalkFunc) + BuildTokens(to Tokens) Tokens +} + +// nodes is a list of nodes. +type nodes struct { + first, last *node +} + +func (ns *nodes) BuildTokens(to Tokens) Tokens { + for n := ns.first; n != nil; n = n.after { + to = n.BuildTokens(to) + } + return to +} + +func (ns *nodes) Clear() { + ns.first = nil + ns.last = nil +} + +func (ns *nodes) Append(c nodeContent) *node { + n := &node{ + content: c, + } + ns.AppendNode(n) + n.list = ns + return n +} + +func (ns *nodes) AppendNode(n *node) { + if ns.last != nil { + n.before = ns.last + ns.last.after = n + } + n.list = ns + ns.last = n + if ns.first == nil { + ns.first = n + } +} + +func (ns *nodes) AppendUnstructuredTokens(tokens Tokens) *node { + if len(tokens) == 0 { + return nil + } + n := newNode(tokens) + ns.AppendNode(n) + n.list = ns + return n +} + +// nodeSet is an unordered set of nodes. It is used to describe a set of nodes +// that all belong to the same list that have some role or characteristic +// in common. +type nodeSet map[*node]struct{} + +func newNodeSet() nodeSet { + return make(nodeSet) +} + +func (ns nodeSet) Has(n *node) bool { + if ns == nil { + return false + } + _, exists := ns[n] + return exists +} + +func (ns nodeSet) Add(n *node) { + ns[n] = struct{}{} +} + +func (ns nodeSet) Remove(n *node) { + delete(ns, n) +} + +func (ns nodeSet) List() []*node { + if len(ns) == 0 { + return nil + } + + ret := make([]*node, 0, len(ns)) + + // Determine which list we are working with. We assume here that all of + // the nodes belong to the same list, since that is part of the contract + // for nodeSet. + var list *nodes + for n := range ns { + list = n.list + break + } + + // We recover the order by iterating over the whole list. This is not + // the most efficient way to do it, but our node lists should always be + // small so not worth making things more complex. + for n := list.first; n != nil; n = n.after { + if ns.Has(n) { + ret = append(ret, n) + } + } + return ret +} + +type internalWalkFunc func(*node) + +// inTree can be embedded into a content struct that has child nodes to get +// a standard implementation of the NodeContent interface and a record of +// a potential parent node. +type inTree struct { + parent *node + children *nodes +} + +func newInTree() inTree { + return inTree{ + children: &nodes{}, + } +} + +func (it *inTree) assertUnattached() { + if it.parent != nil { + panic(fmt.Sprintf("node is already attached to %T", it.parent.content)) + } +} + +func (it *inTree) walkChildNodes(w internalWalkFunc) { + for n := it.children.first; n != nil; n = n.after { + w(n) + } +} + +func (it *inTree) BuildTokens(to Tokens) Tokens { + for n := it.children.first; n != nil; n = n.after { + to = n.BuildTokens(to) + } + return to +} + +// leafNode can be embedded into a content struct to give it a do-nothing +// implementation of walkChildNodes +type leafNode struct { +} + +func (n *leafNode) walkChildNodes(w internalWalkFunc) { +} diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/parser.go b/vendor/github.com/hashicorp/hcl2/hclwrite/parser.go new file mode 100644 index 0000000000..1876818fd8 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hclwrite/parser.go @@ -0,0 +1,594 @@ +package hclwrite + +import ( + "fmt" + "sort" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" +) + +// Our "parser" here is actually not doing any parsing of its own. Instead, +// it leans on the native parser in hclsyntax, and then uses the source ranges +// from the AST to partition the raw token sequence to match the raw tokens +// up to AST nodes. +// +// This strategy feels somewhat counter-intuitive, since most of the work the +// parser does is thrown away here, but this strategy is chosen because the +// normal parsing work done by hclsyntax is considered to be the "main case", +// while modifying and re-printing source is more of an edge case, used only +// in ancillary tools, and so it's good to keep all the main parsing logic +// with the main case but keep all of the extra complexity of token wrangling +// out of the main parser, which is already rather complex just serving the +// use-cases it already serves. +// +// If the parsing step produces any errors, the returned File is nil because +// we can't reliably extract tokens from the partial AST produced by an +// erroneous parse. +func parse(src []byte, filename string, start hcl.Pos) (*File, hcl.Diagnostics) { + file, diags := hclsyntax.ParseConfig(src, filename, start) + if diags.HasErrors() { + return nil, diags + } + + // To do our work here, we use the "native" tokens (those from hclsyntax) + // to match against source ranges in the AST, but ultimately produce + // slices from our sequence of "writer" tokens, which contain only + // *relative* position information that is more appropriate for + // transformation/writing use-cases. + nativeTokens, diags := hclsyntax.LexConfig(src, filename, start) + if diags.HasErrors() { + // should never happen, since we would've caught these diags in + // the first call above. + return nil, diags + } + writerTokens := writerTokens(nativeTokens) + + from := inputTokens{ + nativeTokens: nativeTokens, + writerTokens: writerTokens, + } + + before, root, after := parseBody(file.Body.(*hclsyntax.Body), from) + ret := &File{ + inTree: newInTree(), + + srcBytes: src, + body: root, + } + + nodes := ret.inTree.children + nodes.Append(before.Tokens()) + nodes.AppendNode(root) + nodes.Append(after.Tokens()) + + return ret, diags +} + +type inputTokens struct { + nativeTokens hclsyntax.Tokens + writerTokens Tokens +} + +func (it inputTokens) Partition(rng hcl.Range) (before, within, after inputTokens) { + start, end := partitionTokens(it.nativeTokens, rng) + before = it.Slice(0, start) + within = it.Slice(start, end) + after = it.Slice(end, len(it.nativeTokens)) + return +} + +func (it inputTokens) PartitionType(ty hclsyntax.TokenType) (before, within, after inputTokens) { + for i, t := range it.writerTokens { + if t.Type == ty { + return it.Slice(0, i), it.Slice(i, i+1), it.Slice(i+1, len(it.nativeTokens)) + } + } + panic(fmt.Sprintf("didn't find any token of type %s", ty)) +} + +func (it inputTokens) PartitionTypeSingle(ty hclsyntax.TokenType) (before inputTokens, found *Token, after inputTokens) { + before, within, after := it.PartitionType(ty) + if within.Len() != 1 { + panic("PartitionType found more than one token") + } + return before, within.Tokens()[0], after +} + +// PartitionIncludeComments is like Partition except the returned "within" +// range includes any lead and line comments associated with the range. +func (it inputTokens) PartitionIncludingComments(rng hcl.Range) (before, within, after inputTokens) { + start, end := partitionTokens(it.nativeTokens, rng) + start = partitionLeadCommentTokens(it.nativeTokens[:start]) + _, afterNewline := partitionLineEndTokens(it.nativeTokens[end:]) + end += afterNewline + + before = it.Slice(0, start) + within = it.Slice(start, end) + after = it.Slice(end, len(it.nativeTokens)) + return + +} + +// PartitionBlockItem is similar to PartitionIncludeComments but it returns +// the comments as separate token sequences so that they can be captured into +// AST attributes. It makes assumptions that apply only to block items, so +// should not be used for other constructs. +func (it inputTokens) PartitionBlockItem(rng hcl.Range) (before, leadComments, within, lineComments, newline, after inputTokens) { + before, within, after = it.Partition(rng) + before, leadComments = before.PartitionLeadComments() + lineComments, newline, after = after.PartitionLineEndTokens() + return +} + +func (it inputTokens) PartitionLeadComments() (before, within inputTokens) { + start := partitionLeadCommentTokens(it.nativeTokens) + before = it.Slice(0, start) + within = it.Slice(start, len(it.nativeTokens)) + return +} + +func (it inputTokens) PartitionLineEndTokens() (comments, newline, after inputTokens) { + afterComments, afterNewline := partitionLineEndTokens(it.nativeTokens) + comments = it.Slice(0, afterComments) + newline = it.Slice(afterComments, afterNewline) + after = it.Slice(afterNewline, len(it.nativeTokens)) + return +} + +func (it inputTokens) Slice(start, end int) inputTokens { + // When we slice, we create a new slice with no additional capacity because + // we expect that these slices will be mutated in order to insert + // new code into the AST, and we want to ensure that a new underlying + // array gets allocated in that case, rather than writing into some + // following slice and corrupting it. + return inputTokens{ + nativeTokens: it.nativeTokens[start:end:end], + writerTokens: it.writerTokens[start:end:end], + } +} + +func (it inputTokens) Len() int { + return len(it.nativeTokens) +} + +func (it inputTokens) Tokens() Tokens { + return it.writerTokens +} + +func (it inputTokens) Types() []hclsyntax.TokenType { + ret := make([]hclsyntax.TokenType, len(it.nativeTokens)) + for i, tok := range it.nativeTokens { + ret[i] = tok.Type + } + return ret +} + +// parseBody locates the given body within the given input tokens and returns +// the resulting *Body object as well as the tokens that appeared before and +// after it. +func parseBody(nativeBody *hclsyntax.Body, from inputTokens) (inputTokens, *node, inputTokens) { + before, within, after := from.PartitionIncludingComments(nativeBody.SrcRange) + + // The main AST doesn't retain the original source ordering of the + // body items, so we need to reconstruct that ordering by inspecting + // their source ranges. + nativeItems := make([]hclsyntax.Node, 0, len(nativeBody.Attributes)+len(nativeBody.Blocks)) + for _, nativeAttr := range nativeBody.Attributes { + nativeItems = append(nativeItems, nativeAttr) + } + for _, nativeBlock := range nativeBody.Blocks { + nativeItems = append(nativeItems, nativeBlock) + } + sort.Sort(nativeNodeSorter{nativeItems}) + + body := &Body{ + inTree: newInTree(), + items: newNodeSet(), + } + + remain := within + for _, nativeItem := range nativeItems { + beforeItem, item, afterItem := parseBodyItem(nativeItem, remain) + + if beforeItem.Len() > 0 { + body.AppendUnstructuredTokens(beforeItem.Tokens()) + } + body.appendItemNode(item) + + remain = afterItem + } + + if remain.Len() > 0 { + body.AppendUnstructuredTokens(remain.Tokens()) + } + + return before, newNode(body), after +} + +func parseBodyItem(nativeItem hclsyntax.Node, from inputTokens) (inputTokens, *node, inputTokens) { + before, leadComments, within, lineComments, newline, after := from.PartitionBlockItem(nativeItem.Range()) + + var item *node + + switch tItem := nativeItem.(type) { + case *hclsyntax.Attribute: + item = parseAttribute(tItem, within, leadComments, lineComments, newline) + case *hclsyntax.Block: + item = parseBlock(tItem, within, leadComments, lineComments, newline) + default: + // should never happen if caller is behaving + panic("unsupported native item type") + } + + return before, item, after +} + +func parseAttribute(nativeAttr *hclsyntax.Attribute, from, leadComments, lineComments, newline inputTokens) *node { + attr := &Attribute{ + inTree: newInTree(), + } + children := attr.inTree.children + + { + cn := newNode(newComments(leadComments.Tokens())) + attr.leadComments = cn + children.AppendNode(cn) + } + + before, nameTokens, from := from.Partition(nativeAttr.NameRange) + { + children.AppendUnstructuredTokens(before.Tokens()) + if nameTokens.Len() != 1 { + // Should never happen with valid input + panic("attribute name is not exactly one token") + } + token := nameTokens.Tokens()[0] + in := newNode(newIdentifier(token)) + attr.name = in + children.AppendNode(in) + } + + before, equalsTokens, from := from.Partition(nativeAttr.EqualsRange) + children.AppendUnstructuredTokens(before.Tokens()) + children.AppendUnstructuredTokens(equalsTokens.Tokens()) + + before, exprTokens, from := from.Partition(nativeAttr.Expr.Range()) + { + children.AppendUnstructuredTokens(before.Tokens()) + exprNode := parseExpression(nativeAttr.Expr, exprTokens) + attr.expr = exprNode + children.AppendNode(exprNode) + } + + { + cn := newNode(newComments(lineComments.Tokens())) + attr.lineComments = cn + children.AppendNode(cn) + } + + children.AppendUnstructuredTokens(newline.Tokens()) + + // Collect any stragglers, though there shouldn't be any + children.AppendUnstructuredTokens(from.Tokens()) + + return newNode(attr) +} + +func parseBlock(nativeBlock *hclsyntax.Block, from, leadComments, lineComments, newline inputTokens) *node { + block := &Block{ + inTree: newInTree(), + labels: newNodeSet(), + } + children := block.inTree.children + + { + cn := newNode(newComments(leadComments.Tokens())) + block.leadComments = cn + children.AppendNode(cn) + } + + before, typeTokens, from := from.Partition(nativeBlock.TypeRange) + { + children.AppendUnstructuredTokens(before.Tokens()) + if typeTokens.Len() != 1 { + // Should never happen with valid input + panic("block type name is not exactly one token") + } + token := typeTokens.Tokens()[0] + in := newNode(newIdentifier(token)) + block.typeName = in + children.AppendNode(in) + } + + for _, rng := range nativeBlock.LabelRanges { + var labelTokens inputTokens + before, labelTokens, from = from.Partition(rng) + children.AppendUnstructuredTokens(before.Tokens()) + tokens := labelTokens.Tokens() + ln := newNode(newQuoted(tokens)) + block.labels.Add(ln) + children.AppendNode(ln) + } + + before, oBrace, from := from.Partition(nativeBlock.OpenBraceRange) + children.AppendUnstructuredTokens(before.Tokens()) + children.AppendUnstructuredTokens(oBrace.Tokens()) + + // We go a bit out of order here: we go hunting for the closing brace + // so that we have a delimited body, but then we'll deal with the body + // before we actually append the closing brace and any straggling tokens + // that appear after it. + bodyTokens, cBrace, from := from.Partition(nativeBlock.CloseBraceRange) + before, body, after := parseBody(nativeBlock.Body, bodyTokens) + children.AppendUnstructuredTokens(before.Tokens()) + block.body = body + children.AppendNode(body) + children.AppendUnstructuredTokens(after.Tokens()) + + children.AppendUnstructuredTokens(cBrace.Tokens()) + + // stragglers + children.AppendUnstructuredTokens(from.Tokens()) + if lineComments.Len() > 0 { + // blocks don't actually have line comments, so we'll just treat + // them as extra stragglers + children.AppendUnstructuredTokens(lineComments.Tokens()) + } + children.AppendUnstructuredTokens(newline.Tokens()) + + return newNode(block) +} + +func parseExpression(nativeExpr hclsyntax.Expression, from inputTokens) *node { + expr := newExpression() + children := expr.inTree.children + + nativeVars := nativeExpr.Variables() + + for _, nativeTraversal := range nativeVars { + before, traversal, after := parseTraversal(nativeTraversal, from) + children.AppendUnstructuredTokens(before.Tokens()) + children.AppendNode(traversal) + expr.absTraversals.Add(traversal) + from = after + } + // Attach any stragglers that don't belong to a traversal to the expression + // itself. In an expression with no traversals at all, this is just the + // entirety of "from". + children.AppendUnstructuredTokens(from.Tokens()) + + return newNode(expr) +} + +func parseTraversal(nativeTraversal hcl.Traversal, from inputTokens) (before inputTokens, n *node, after inputTokens) { + traversal := newTraversal() + children := traversal.inTree.children + before, from, after = from.Partition(nativeTraversal.SourceRange()) + + stepAfter := from + for _, nativeStep := range nativeTraversal { + before, step, after := parseTraversalStep(nativeStep, stepAfter) + children.AppendUnstructuredTokens(before.Tokens()) + children.AppendNode(step) + traversal.steps.Add(step) + stepAfter = after + } + + return before, newNode(traversal), after +} + +func parseTraversalStep(nativeStep hcl.Traverser, from inputTokens) (before inputTokens, n *node, after inputTokens) { + var children *nodes + switch tNativeStep := nativeStep.(type) { + + case hcl.TraverseRoot, hcl.TraverseAttr: + step := newTraverseName() + children = step.inTree.children + before, from, after = from.Partition(nativeStep.SourceRange()) + inBefore, token, inAfter := from.PartitionTypeSingle(hclsyntax.TokenIdent) + name := newIdentifier(token) + children.AppendUnstructuredTokens(inBefore.Tokens()) + step.name = children.Append(name) + children.AppendUnstructuredTokens(inAfter.Tokens()) + return before, newNode(step), after + + case hcl.TraverseIndex: + step := newTraverseIndex() + children = step.inTree.children + before, from, after = from.Partition(nativeStep.SourceRange()) + + var inBefore, oBrack, keyTokens, cBrack inputTokens + inBefore, oBrack, from = from.PartitionType(hclsyntax.TokenOBrack) + children.AppendUnstructuredTokens(inBefore.Tokens()) + children.AppendUnstructuredTokens(oBrack.Tokens()) + keyTokens, cBrack, from = from.PartitionType(hclsyntax.TokenCBrack) + + keyVal := tNativeStep.Key + switch keyVal.Type() { + case cty.String: + key := newQuoted(keyTokens.Tokens()) + step.key = children.Append(key) + case cty.Number: + valBefore, valToken, valAfter := keyTokens.PartitionTypeSingle(hclsyntax.TokenNumberLit) + children.AppendUnstructuredTokens(valBefore.Tokens()) + key := newNumber(valToken) + step.key = children.Append(key) + children.AppendUnstructuredTokens(valAfter.Tokens()) + } + + children.AppendUnstructuredTokens(cBrack.Tokens()) + children.AppendUnstructuredTokens(from.Tokens()) + + return before, newNode(step), after + default: + panic(fmt.Sprintf("unsupported traversal step type %T", nativeStep)) + } + +} + +// writerTokens takes a sequence of tokens as produced by the main hclsyntax +// package and transforms it into an equivalent sequence of tokens using +// this package's own token model. +// +// The resulting list contains the same number of tokens and uses the same +// indices as the input, allowing the two sets of tokens to be correlated +// by index. +func writerTokens(nativeTokens hclsyntax.Tokens) Tokens { + // Ultimately we want a slice of token _pointers_, but since we can + // predict how much memory we're going to devote to tokens we'll allocate + // it all as a single flat buffer and thus give the GC less work to do. + tokBuf := make([]Token, len(nativeTokens)) + var lastByteOffset int + for i, mainToken := range nativeTokens { + // Create a copy of the bytes so that we can mutate without + // corrupting the original token stream. + bytes := make([]byte, len(mainToken.Bytes)) + copy(bytes, mainToken.Bytes) + + tokBuf[i] = Token{ + Type: mainToken.Type, + Bytes: bytes, + + // We assume here that spaces are always ASCII spaces, since + // that's what the scanner also assumes, and thus the number + // of bytes skipped is also the number of space characters. + SpacesBefore: mainToken.Range.Start.Byte - lastByteOffset, + } + + lastByteOffset = mainToken.Range.End.Byte + } + + // Now make a slice of pointers into the previous slice. + ret := make(Tokens, len(tokBuf)) + for i := range ret { + ret[i] = &tokBuf[i] + } + + return ret +} + +// partitionTokens takes a sequence of tokens and a hcl.Range and returns +// two indices within the token sequence that correspond with the range +// boundaries, such that the slice operator could be used to produce +// three token sequences for before, within, and after respectively: +// +// start, end := partitionTokens(toks, rng) +// before := toks[:start] +// within := toks[start:end] +// after := toks[end:] +// +// This works best when the range is aligned with token boundaries (e.g. +// because it was produced in terms of the scanner's result) but if that isn't +// true then it will make a best effort that may produce strange results at +// the boundaries. +// +// Native hclsyntax tokens are used here, because they contain the necessary +// absolute position information. However, since writerTokens produces a +// correlatable sequence of writer tokens, the resulting indices can be +// used also to index into its result, allowing the partitioning of writer +// tokens to be driven by the partitioning of native tokens. +// +// The tokens are assumed to be in source order and non-overlapping, which +// will be true if the token sequence from the scanner is used directly. +func partitionTokens(toks hclsyntax.Tokens, rng hcl.Range) (start, end int) { + // We us a linear search here because we assume tha in most cases our + // target range is close to the beginning of the sequence, and the seqences + // are generally small for most reasonable files anyway. + for i := 0; ; i++ { + if i >= len(toks) { + // No tokens for the given range at all! + return len(toks), len(toks) + } + + if toks[i].Range.Start.Byte >= rng.Start.Byte { + start = i + break + } + } + + for i := start; ; i++ { + if i >= len(toks) { + // The range "hangs off" the end of the token sequence + return start, len(toks) + } + + if toks[i].Range.Start.Byte >= rng.End.Byte { + end = i // end marker is exclusive + break + } + } + + return start, end +} + +// partitionLeadCommentTokens takes a sequence of tokens that is assumed +// to immediately precede a construct that can have lead comment tokens, +// and returns the index into that sequence where the lead comments begin. +// +// Lead comments are defined as whole lines containing only comment tokens +// with no blank lines between. If no such lines are found, the returned +// index will be len(toks). +func partitionLeadCommentTokens(toks hclsyntax.Tokens) int { + // single-line comments (which is what we're interested in here) + // consume their trailing newline, so we can just walk backwards + // until we stop seeing comment tokens. + for i := len(toks) - 1; i >= 0; i-- { + if toks[i].Type != hclsyntax.TokenComment { + return i + 1 + } + } + return 0 +} + +// partitionLineEndTokens takes a sequence of tokens that is assumed +// to immediately follow a construct that can have a line comment, and +// returns first the index where any line comments end and then second +// the index immediately after the trailing newline. +// +// Line comments are defined as comments that appear immediately after +// a construct on the same line where its significant tokens ended. +// +// Since single-line comment tokens (# and //) include the newline that +// terminates them, in the presence of these the two returned indices +// will be the same since the comment itself serves as the line end. +func partitionLineEndTokens(toks hclsyntax.Tokens) (afterComment, afterNewline int) { + for i := 0; i < len(toks); i++ { + tok := toks[i] + if tok.Type != hclsyntax.TokenComment { + switch tok.Type { + case hclsyntax.TokenNewline: + return i, i + 1 + case hclsyntax.TokenEOF: + // Although this is valid, we mustn't include the EOF + // itself as our "newline" or else strange things will + // happen when we try to append new items. + return i, i + default: + // If we have well-formed input here then nothing else should be + // possible. This path should never happen, because we only try + // to extract tokens from the sequence if the parser succeeded, + // and it should catch this problem itself. + panic("malformed line trailers: expected only comments and newlines") + } + } + + if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' { + // Newline at the end of a single-line comment serves both as + // the end of comments *and* the end of the line. + return i + 1, i + 1 + } + } + return len(toks), len(toks) +} + +// lexConfig uses the hclsyntax scanner to get a token stream and then +// rewrites it into this package's token model. +// +// Any errors produced during scanning are ignored, so the results of this +// function should be used with care. +func lexConfig(src []byte) Tokens { + mainTokens, _ := hclsyntax.LexConfig(src, "", hcl.Pos{Byte: 0, Line: 1, Column: 1}) + return writerTokens(mainTokens) +} diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/public.go b/vendor/github.com/hashicorp/hcl2/hclwrite/public.go new file mode 100644 index 0000000000..4d5ce2a6ef --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hclwrite/public.go @@ -0,0 +1,44 @@ +package hclwrite + +import ( + "bytes" + + "github.com/hashicorp/hcl2/hcl" +) + +// NewFile creates a new file object that is empty and ready to have constructs +// added t it. +func NewFile() *File { + body := &Body{ + inTree: newInTree(), + items: newNodeSet(), + } + file := &File{ + inTree: newInTree(), + } + file.body = file.inTree.children.Append(body) + return file +} + +// ParseConfig interprets the given source bytes into a *hclwrite.File. The +// resulting AST can be used to perform surgical edits on the source code +// before turning it back into bytes again. +func ParseConfig(src []byte, filename string, start hcl.Pos) (*File, hcl.Diagnostics) { + return parse(src, filename, start) +} + +// Format takes source code and performs simple whitespace changes to transform +// it to a canonical layout style. +// +// Format skips constructing an AST and works directly with tokens, so it +// is less expensive than formatting via the AST for situations where no other +// changes will be made. It also ignores syntax errors and can thus be applied +// to partial source code, although the result in that case may not be +// desirable. +func Format(src []byte) []byte { + tokens := lexConfig(src) + format(tokens) + buf := &bytes.Buffer{} + tokens.WriteTo(buf) + return buf.Bytes() +} diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/tokens.go b/vendor/github.com/hashicorp/hcl2/hclwrite/tokens.go new file mode 100644 index 0000000000..d87f81853b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hclwrite/tokens.go @@ -0,0 +1,122 @@ +package hclwrite + +import ( + "bytes" + "io" + + "github.com/apparentlymart/go-textseg/textseg" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" +) + +// Token is a single sequence of bytes annotated with a type. It is similar +// in purpose to hclsyntax.Token, but discards the source position information +// since that is not useful in code generation. +type Token struct { + Type hclsyntax.TokenType + Bytes []byte + + // We record the number of spaces before each token so that we can + // reproduce the exact layout of the original file when we're making + // surgical changes in-place. When _new_ code is created it will always + // be in the canonical style, but we preserve layout of existing code. + SpacesBefore int +} + +// asHCLSyntax returns the receiver expressed as an incomplete hclsyntax.Token. +// A complete token is not possible since we don't have source location +// information here, and so this method is unexported so we can be sure it will +// only be used for internal purposes where we know the range isn't important. +// +// This is primarily intended to allow us to re-use certain functionality from +// hclsyntax rather than re-implementing it against our own token type here. +func (t *Token) asHCLSyntax() hclsyntax.Token { + return hclsyntax.Token{ + Type: t.Type, + Bytes: t.Bytes, + Range: hcl.Range{ + Filename: "", + }, + } +} + +// Tokens is a flat list of tokens. +type Tokens []*Token + +func (ts Tokens) Bytes() []byte { + buf := &bytes.Buffer{} + ts.WriteTo(buf) + return buf.Bytes() +} + +func (ts Tokens) testValue() string { + return string(ts.Bytes()) +} + +// Columns returns the number of columns (grapheme clusters) the token sequence +// occupies. The result is not meaningful if there are newline or single-line +// comment tokens in the sequence. +func (ts Tokens) Columns() int { + ret := 0 + for _, token := range ts { + ret += token.SpacesBefore // spaces are always worth one column each + ct, _ := textseg.TokenCount(token.Bytes, textseg.ScanGraphemeClusters) + ret += ct + } + return ret +} + +// WriteTo takes an io.Writer and writes the bytes for each token to it, +// along with the spacing that separates each token. In other words, this +// allows serializing the tokens to a file or other such byte stream. +func (ts Tokens) WriteTo(wr io.Writer) (int64, error) { + // We know we're going to be writing a lot of small chunks of repeated + // space characters, so we'll prepare a buffer of these that we can + // easily pass to wr.Write without any further allocation. + spaces := make([]byte, 40) + for i := range spaces { + spaces[i] = ' ' + } + + var n int64 + var err error + for _, token := range ts { + if err != nil { + return n, err + } + + for spacesBefore := token.SpacesBefore; spacesBefore > 0; spacesBefore -= len(spaces) { + thisChunk := spacesBefore + if thisChunk > len(spaces) { + thisChunk = len(spaces) + } + var thisN int + thisN, err = wr.Write(spaces[:thisChunk]) + n += int64(thisN) + if err != nil { + return n, err + } + } + + var thisN int + thisN, err = wr.Write(token.Bytes) + n += int64(thisN) + } + + return n, err +} + +func (ts Tokens) walkChildNodes(w internalWalkFunc) { + // Unstructured tokens have no child nodes +} + +func (ts Tokens) BuildTokens(to Tokens) Tokens { + return append(to, ts...) +} + +func newIdentToken(name string) *Token { + return &Token{ + Type: hclsyntax.TokenIdent, + Bytes: []byte(name), + } +} diff --git a/vendor/github.com/hashicorp/hil/convert.go b/vendor/github.com/hashicorp/hil/convert.go index f2024d01c2..184e029b05 100644 --- a/vendor/github.com/hashicorp/hil/convert.go +++ b/vendor/github.com/hashicorp/hil/convert.go @@ -47,8 +47,23 @@ func hilMapstructureWeakDecode(m interface{}, rawVal interface{}) error { } func InterfaceToVariable(input interface{}) (ast.Variable, error) { - if inputVariable, ok := input.(ast.Variable); ok { - return inputVariable, nil + if iv, ok := input.(ast.Variable); ok { + return iv, nil + } + + // This is just to maintain backward compatibility + // after https://github.com/mitchellh/mapstructure/pull/98 + if v, ok := input.([]ast.Variable); ok { + return ast.Variable{ + Type: ast.TypeList, + Value: v, + }, nil + } + if v, ok := input.(map[string]ast.Variable); ok { + return ast.Variable{ + Type: ast.TypeMap, + Value: v, + }, nil } var stringVal string diff --git a/vendor/github.com/hashicorp/hil/go.mod b/vendor/github.com/hashicorp/hil/go.mod new file mode 100644 index 0000000000..45719a69b7 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/go.mod @@ -0,0 +1,6 @@ +module github.com/hashicorp/hil + +require ( + github.com/mitchellh/mapstructure v1.1.2 + github.com/mitchellh/reflectwalk v1.0.0 +) diff --git a/vendor/github.com/hashicorp/hil/go.sum b/vendor/github.com/hashicorp/hil/go.sum new file mode 100644 index 0000000000..83639b6919 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/go.sum @@ -0,0 +1,4 @@ +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= diff --git a/vendor/github.com/hashicorp/hil/scanner/scanner.go b/vendor/github.com/hashicorp/hil/scanner/scanner.go index bab86c67a6..86085de018 100644 --- a/vendor/github.com/hashicorp/hil/scanner/scanner.go +++ b/vendor/github.com/hashicorp/hil/scanner/scanner.go @@ -395,6 +395,12 @@ func scanLiteral(s string, startPos ast.Pos, nested bool) (string, *Token) { pos.Column = pos.Column + 2 litLen = litLen + 2 continue + } else if follow == '\\' { + // \\ escapes \ + // so we will consume both characters here. + pos.Column = pos.Column + 2 + litLen = litLen + 2 + continue } } } diff --git a/vendor/github.com/hashicorp/logutils/go.mod b/vendor/github.com/hashicorp/logutils/go.mod new file mode 100644 index 0000000000..ba38a45764 --- /dev/null +++ b/vendor/github.com/hashicorp/logutils/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/logutils diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/LICENSE b/vendor/github.com/hashicorp/terraform-config-inspect/LICENSE new file mode 100644 index 0000000000..82b4de97c7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/diagnostic.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/diagnostic.go new file mode 100644 index 0000000000..8d04ad4de2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/diagnostic.go @@ -0,0 +1,138 @@ +package tfconfig + +import ( + "fmt" + + legacyhclparser "github.com/hashicorp/hcl/hcl/parser" + "github.com/hashicorp/hcl2/hcl" +) + +// Diagnostic describes a problem (error or warning) encountered during +// configuration loading. +type Diagnostic struct { + Severity DiagSeverity `json:"severity"` + Summary string `json:"summary"` + Detail string `json:"detail,omitempty"` + + // Pos is not populated for all diagnostics, but when populated should + // indicate a particular line that the described problem relates to. + Pos *SourcePos `json:"pos,omitempty"` +} + +// Diagnostics represents a sequence of diagnostics. This is the type that +// should be returned from a function that might generate diagnostics. +type Diagnostics []Diagnostic + +// HasErrors returns true if there is at least one Diagnostic of severity +// DiagError in the receiever. +// +// If a function returns a Diagnostics without errors then the result can +// be assumed to be complete within the "best effort" constraints of this +// library. If errors are present then the caller may wish to employ more +// caution in relying on the result. +func (diags Diagnostics) HasErrors() bool { + for _, diag := range diags { + if diag.Severity == DiagError { + return true + } + } + return false +} + +func (diags Diagnostics) Error() string { + switch len(diags) { + case 0: + return "no problems" + case 1: + return fmt.Sprintf("%s: %s", diags[0].Summary, diags[0].Detail) + default: + return fmt.Sprintf("%s: %s (and %d other messages)", diags[0].Summary, diags[0].Detail, len(diags)-1) + } +} + +// Err returns an error representing the receiver if the receiver HasErrors, or +// nil otherwise. +// +// The returned error can be type-asserted back to a Diagnostics if needed. +func (diags Diagnostics) Err() error { + if diags.HasErrors() { + return diags + } + return nil +} + +// DiagSeverity describes the severity of a Diagnostic. +type DiagSeverity rune + +// DiagError indicates a problem that prevented proper processing of the +// configuration. In the precense of DiagError diagnostics the result is +// likely to be incomplete. +const DiagError DiagSeverity = 'E' + +// DiagWarning indicates a problem that the user may wish to consider but +// that did not prevent proper processing of the configuration. +const DiagWarning DiagSeverity = 'W' + +// MarshalJSON is an implementation of encoding/json.Marshaler +func (s DiagSeverity) MarshalJSON() ([]byte, error) { + switch s { + case DiagError: + return []byte(`"error"`), nil + case DiagWarning: + return []byte(`"warning"`), nil + default: + return []byte(`"invalid"`), nil + } +} + +func diagnosticsHCL(diags hcl.Diagnostics) Diagnostics { + if len(diags) == 0 { + return nil + } + ret := make(Diagnostics, len(diags)) + for i, diag := range diags { + ret[i] = Diagnostic{ + Summary: diag.Summary, + Detail: diag.Detail, + } + switch diag.Severity { + case hcl.DiagError: + ret[i].Severity = DiagError + case hcl.DiagWarning: + ret[i].Severity = DiagWarning + } + if diag.Subject != nil { + pos := sourcePosHCL(*diag.Subject) + ret[i].Pos = &pos + } + } + return ret +} + +func diagnosticsError(err error) Diagnostics { + if err == nil { + return nil + } + + if posErr, ok := err.(*legacyhclparser.PosError); ok { + pos := sourcePosLegacyHCL(posErr.Pos, "") + return Diagnostics{ + Diagnostic{ + Severity: DiagError, + Summary: posErr.Err.Error(), + Pos: &pos, + }, + } + } + + return Diagnostics{ + Diagnostic{ + Severity: DiagError, + Summary: err.Error(), + }, + } +} + +func diagnosticsErrorf(format string, args ...interface{}) Diagnostics { + return diagnosticsError(fmt.Errorf(format, args...)) +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/doc.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/doc.go new file mode 100644 index 0000000000..1604a6e08a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/doc.go @@ -0,0 +1,21 @@ +// Package tfconfig is a helper library that does careful, shallow parsing of +// Terraform modules to provide access to high-level metadata while +// remaining broadly compatible with configurations targeting various +// different Terraform versions. +// +// This packge focuses on describing top-level objects only, and in particular +// does not attempt any sort of processing that would require access to plugins. +// Currently it allows callers to extract high-level information about +// variables, outputs, resource blocks, provider dependencies, and Terraform +// Core dependencies. +// +// This package only works at the level of single modules. A full configuration +// is a tree of potentially several modules, some of which may be references +// to remote packages. There are some basic helpers for traversing calls to +// modules at relative local paths, however. +// +// This package employs a "best effort" parsing strategy, producing as complete +// a result as possible even though the input may not be entirely valid. The +// intended use-case is high-level analysis and indexing of externally-facing +// module characteristics, as opposed to validating or even applying the module. +package tfconfig diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load.go new file mode 100644 index 0000000000..2d13fe1245 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load.go @@ -0,0 +1,130 @@ +package tfconfig + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "strings" + + "github.com/hashicorp/hcl2/hcl" +) + +// LoadModule reads the directory at the given path and attempts to interpret +// it as a Terraform module. +func LoadModule(dir string) (*Module, Diagnostics) { + + // For broad compatibility here we actually have two separate loader + // codepaths. The main one uses the new HCL parser and API and is intended + // for configurations from Terraform 0.12 onwards (though will work for + // many older configurations too), but we'll also fall back on one that + // uses the _old_ HCL implementation so we can deal with some edge-cases + // that are not valid in new HCL. + + module, diags := loadModule(dir) + if diags.HasErrors() { + // Try using the legacy HCL parser and see if we fare better. + legacyModule, legacyDiags := loadModuleLegacyHCL(dir) + if !legacyDiags.HasErrors() { + legacyModule.init(legacyDiags) + return legacyModule, legacyDiags + } + } + + module.init(diags) + return module, diags +} + +// IsModuleDir checks if the given path contains terraform configuration files. +// This allows the caller to decide how to handle directories that do not have tf files. +func IsModuleDir(dir string) bool { + primaryPaths, _ := dirFiles(dir) + if len(primaryPaths) == 0 { + return false + } + return true +} + +func (m *Module) init(diags Diagnostics) { + // Fill in any additional provider requirements that are implied by + // resource configurations, to avoid the caller from needing to apply + // this logic itself. Implied requirements don't have version constraints, + // but we'll make sure the requirement value is still non-nil in this + // case so callers can easily recognize it. + for _, r := range m.ManagedResources { + if _, exists := m.RequiredProviders[r.Provider.Name]; !exists { + m.RequiredProviders[r.Provider.Name] = []string{} + } + } + for _, r := range m.DataResources { + if _, exists := m.RequiredProviders[r.Provider.Name]; !exists { + m.RequiredProviders[r.Provider.Name] = []string{} + } + } + + // We redundantly also reference the diagnostics from inside the module + // object, primarily so that we can easily included in JSON-serialized + // versions of the module object. + m.Diagnostics = diags +} + +func dirFiles(dir string) (primary []string, diags hcl.Diagnostics) { + infos, err := ioutil.ReadDir(dir) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Failed to read module directory", + Detail: fmt.Sprintf("Module directory %s does not exist or cannot be read.", dir), + }) + return + } + + var override []string + for _, info := range infos { + if info.IsDir() { + // We only care about files + continue + } + + name := info.Name() + ext := fileExt(name) + if ext == "" || isIgnoredFile(name) { + continue + } + + baseName := name[:len(name)-len(ext)] // strip extension + isOverride := baseName == "override" || strings.HasSuffix(baseName, "_override") + + fullPath := filepath.Join(dir, name) + if isOverride { + override = append(override, fullPath) + } else { + primary = append(primary, fullPath) + } + } + + // We are assuming that any _override files will be logically named, + // and processing the files in alphabetical order. Primaries first, then overrides. + primary = append(primary, override...) + + return +} + +// fileExt returns the Terraform configuration extension of the given +// path, or a blank string if it is not a recognized extension. +func fileExt(path string) string { + if strings.HasSuffix(path, ".tf") { + return ".tf" + } else if strings.HasSuffix(path, ".tf.json") { + return ".tf.json" + } else { + return "" + } +} + +// isIgnoredFile returns true if the given filename (which must not have a +// directory path ahead of it) should be ignored as e.g. an editor swap file. +func isIgnoredFile(name string) bool { + return strings.HasPrefix(name, ".") || // Unix-like hidden files + strings.HasSuffix(name, "~") || // vim + strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_hcl.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_hcl.go new file mode 100644 index 0000000000..72b5d4af90 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_hcl.go @@ -0,0 +1,322 @@ +package tfconfig + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/hashicorp/hcl2/hcl/hclsyntax" + + "github.com/hashicorp/hcl2/gohcl" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hclparse" + ctyjson "github.com/zclconf/go-cty/cty/json" +) + +func loadModule(dir string) (*Module, Diagnostics) { + mod := newModule(dir) + primaryPaths, diags := dirFiles(dir) + + parser := hclparse.NewParser() + + for _, filename := range primaryPaths { + var file *hcl.File + var fileDiags hcl.Diagnostics + if strings.HasSuffix(filename, ".json") { + file, fileDiags = parser.ParseJSONFile(filename) + } else { + file, fileDiags = parser.ParseHCLFile(filename) + } + diags = append(diags, fileDiags...) + if file == nil { + continue + } + + content, _, contentDiags := file.Body.PartialContent(rootSchema) + diags = append(diags, contentDiags...) + + for _, block := range content.Blocks { + switch block.Type { + + case "terraform": + content, _, contentDiags := block.Body.PartialContent(terraformBlockSchema) + diags = append(diags, contentDiags...) + + if attr, defined := content.Attributes["required_version"]; defined { + var version string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version) + diags = append(diags, valDiags...) + if !valDiags.HasErrors() { + mod.RequiredCore = append(mod.RequiredCore, version) + } + } + + for _, block := range content.Blocks { + // Our schema only allows required_providers here, so we + // assume that we'll only get that block type. + attrs, attrDiags := block.Body.JustAttributes() + diags = append(diags, attrDiags...) + + for name, attr := range attrs { + var version string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version) + diags = append(diags, valDiags...) + if !valDiags.HasErrors() { + mod.RequiredProviders[name] = append(mod.RequiredProviders[name], version) + } + } + } + + case "variable": + content, _, contentDiags := block.Body.PartialContent(variableSchema) + diags = append(diags, contentDiags...) + + name := block.Labels[0] + v := &Variable{ + Name: name, + Pos: sourcePosHCL(block.DefRange), + } + + mod.Variables[name] = v + + if attr, defined := content.Attributes["type"]; defined { + // We handle this particular attribute in a somewhat-tricky way: + // since Terraform may evolve its type expression syntax in + // future versions, we don't want to be overly-strict in how + // we handle it here, and so we'll instead just take the raw + // source provided by the user, using the source location + // information in the expression object. + // + // However, older versions of Terraform expected the type + // to be a string containing a keyword, so we'll need to + // handle that as a special case first for backward compatibility. + + var typeExpr string + + var typeExprAsStr string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &typeExprAsStr) + if !valDiags.HasErrors() { + typeExpr = typeExprAsStr + } else { + + rng := attr.Expr.Range() + sourceFilename := rng.Filename + source, exists := parser.Sources()[sourceFilename] + if exists { + typeExpr = string(rng.SliceBytes(source)) + } else { + // This should never happen, so we'll just warn about it and leave the type unspecified. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Source code not available", + Detail: fmt.Sprintf("Source code is not available for the file %q, which declares the variable %q.", sourceFilename, name), + Subject: &block.DefRange, + }) + typeExpr = "" + } + + } + + v.Type = typeExpr + } + + if attr, defined := content.Attributes["description"]; defined { + var description string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &description) + diags = append(diags, valDiags...) + v.Description = description + } + + if attr, defined := content.Attributes["default"]; defined { + // To avoid the caller needing to deal with cty here, we'll + // use its JSON encoding to convert into an + // approximately-equivalent plain Go interface{} value + // to return. + val, valDiags := attr.Expr.Value(nil) + diags = append(diags, valDiags...) + if val.IsWhollyKnown() { // should only be false if there are errors in the input + valJSON, err := ctyjson.Marshal(val, val.Type()) + if err != nil { + // Should never happen, since all possible known + // values have a JSON mapping. + panic(fmt.Errorf("failed to serialize default value as JSON: %s", err)) + } + var def interface{} + err = json.Unmarshal(valJSON, &def) + if err != nil { + // Again should never happen, because valJSON is + // guaranteed valid by ctyjson.Marshal. + panic(fmt.Errorf("failed to re-parse default value from JSON: %s", err)) + } + v.Default = def + } + } + + case "output": + + content, _, contentDiags := block.Body.PartialContent(outputSchema) + diags = append(diags, contentDiags...) + + name := block.Labels[0] + o := &Output{ + Name: name, + Pos: sourcePosHCL(block.DefRange), + } + + mod.Outputs[name] = o + + if attr, defined := content.Attributes["description"]; defined { + var description string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &description) + diags = append(diags, valDiags...) + o.Description = description + } + + case "provider": + + content, _, contentDiags := block.Body.PartialContent(providerConfigSchema) + diags = append(diags, contentDiags...) + + name := block.Labels[0] + + if attr, defined := content.Attributes["version"]; defined { + var version string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version) + diags = append(diags, valDiags...) + if !valDiags.HasErrors() { + mod.RequiredProviders[name] = append(mod.RequiredProviders[name], version) + } + } + + // Even if there wasn't an explicit version required, we still + // need an entry in our map to signal the unversioned dependency. + if _, exists := mod.RequiredProviders[name]; !exists { + mod.RequiredProviders[name] = []string{} + } + + case "resource", "data": + + content, _, contentDiags := block.Body.PartialContent(resourceSchema) + diags = append(diags, contentDiags...) + + typeName := block.Labels[0] + name := block.Labels[1] + + r := &Resource{ + Type: typeName, + Name: name, + Pos: sourcePosHCL(block.DefRange), + } + + var resourcesMap map[string]*Resource + + switch block.Type { + case "resource": + r.Mode = ManagedResourceMode + resourcesMap = mod.ManagedResources + case "data": + r.Mode = DataResourceMode + resourcesMap = mod.DataResources + } + + key := r.MapKey() + + resourcesMap[key] = r + + if attr, defined := content.Attributes["provider"]; defined { + // New style here is to provide this as a naked traversal + // expression, but we also support quoted references for + // older configurations that predated this convention. + traversal, travDiags := hcl.AbsTraversalForExpr(attr.Expr) + if travDiags.HasErrors() { + traversal = nil // in case we got any partial results + + // Fall back on trying to parse as a string + var travStr string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &travStr) + if !valDiags.HasErrors() { + var strDiags hcl.Diagnostics + traversal, strDiags = hclsyntax.ParseTraversalAbs([]byte(travStr), "", hcl.Pos{}) + if strDiags.HasErrors() { + traversal = nil + } + } + } + + // If we get out here with a nil traversal then we didn't + // succeed in processing the input. + if len(traversal) > 0 { + providerName := traversal.RootName() + alias := "" + if len(traversal) > 1 { + if getAttr, ok := traversal[1].(hcl.TraverseAttr); ok { + alias = getAttr.Name + } + } + r.Provider = ProviderRef{ + Name: providerName, + Alias: alias, + } + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider reference", + Detail: "Provider argument requires a provider name followed by an optional alias, like \"aws.foo\".", + Subject: attr.Expr.Range().Ptr(), + }) + } + } else { + // If provider _isn't_ set then we'll infer it from the + // resource type. + r.Provider = ProviderRef{ + Name: resourceTypeDefaultProviderName(r.Type), + } + } + + case "module": + + content, _, contentDiags := block.Body.PartialContent(moduleCallSchema) + diags = append(diags, contentDiags...) + + name := block.Labels[0] + mc := &ModuleCall{ + Name: block.Labels[0], + Pos: sourcePosHCL(block.DefRange), + } + + // check if this is overriding an existing module + var origSource string + if origMod, exists := mod.ModuleCalls[name]; exists { + origSource = origMod.Source + } + + mod.ModuleCalls[name] = mc + + if attr, defined := content.Attributes["source"]; defined { + var source string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &source) + diags = append(diags, valDiags...) + mc.Source = source + } + + if mc.Source == "" { + mc.Source = origSource + } + + if attr, defined := content.Attributes["version"]; defined { + var version string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version) + diags = append(diags, valDiags...) + mc.Version = version + } + + default: + // Should never happen because our cases above should be + // exhaustive for our schema. + panic(fmt.Errorf("unhandled block type %q", block.Type)) + } + } + } + + return mod, diagnosticsHCL(diags) +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_legacy.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_legacy.go new file mode 100644 index 0000000000..86ffdf11dd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_legacy.go @@ -0,0 +1,325 @@ +package tfconfig + +import ( + "io/ioutil" + "strings" + + legacyhcl "github.com/hashicorp/hcl" + legacyast "github.com/hashicorp/hcl/hcl/ast" +) + +func loadModuleLegacyHCL(dir string) (*Module, Diagnostics) { + // This implementation is intentionally more quick-and-dirty than the + // main loader. In particular, it doesn't bother to keep careful track + // of multiple error messages because we always fall back on returning + // the main parser's error message if our fallback parsing produces + // an error, and thus the errors here are not seen by the end-caller. + mod := newModule(dir) + + primaryPaths, diags := dirFiles(dir) + if diags.HasErrors() { + return mod, diagnosticsHCL(diags) + } + + for _, filename := range primaryPaths { + src, err := ioutil.ReadFile(filename) + if err != nil { + return mod, diagnosticsErrorf("Error reading %s: %s", filename, err) + } + + hclRoot, err := legacyhcl.Parse(string(src)) + if err != nil { + return mod, diagnosticsErrorf("Error parsing %s: %s", filename, err) + } + + list, ok := hclRoot.Node.(*legacyast.ObjectList) + if !ok { + return mod, diagnosticsErrorf("Error parsing %s: no root object", filename) + } + + for _, item := range list.Filter("terraform").Items { + if len(item.Keys) > 0 { + item = &legacyast.ObjectItem{ + Val: &legacyast.ObjectType{ + List: &legacyast.ObjectList{ + Items: []*legacyast.ObjectItem{item}, + }, + }, + } + } + + type TerraformBlock struct { + RequiredVersion string `hcl:"required_version"` + } + var block TerraformBlock + err = legacyhcl.DecodeObject(&block, item.Val) + if err != nil { + return nil, diagnosticsErrorf("terraform block: %s", err) + } + + if block.RequiredVersion != "" { + mod.RequiredCore = append(mod.RequiredCore, block.RequiredVersion) + } + } + + if vars := list.Filter("variable"); len(vars.Items) > 0 { + vars = vars.Children() + type VariableBlock struct { + Type string `hcl:"type"` + Default interface{} + Description string + Fields []string `hcl:",decodedFields"` + } + + for _, item := range vars.Items { + unwrapLegacyHCLObjectKeysFromJSON(item, 1) + + if len(item.Keys) != 1 { + return nil, diagnosticsErrorf("variable block at %s has no label", item.Pos()) + } + + name := item.Keys[0].Token.Value().(string) + + var block VariableBlock + err := legacyhcl.DecodeObject(&block, item.Val) + if err != nil { + return nil, diagnosticsErrorf("invalid variable block at %s: %s", item.Pos(), err) + } + + // Clean up legacy HCL decoding ambiguity by unwrapping list of maps + if ms, ok := block.Default.([]map[string]interface{}); ok { + def := make(map[string]interface{}) + for _, m := range ms { + for k, v := range m { + def[k] = v + } + } + block.Default = def + } + + v := &Variable{ + Name: name, + Type: block.Type, + Description: block.Description, + Default: block.Default, + Pos: sourcePosLegacyHCL(item.Pos(), filename), + } + if _, exists := mod.Variables[name]; exists { + return nil, diagnosticsErrorf("duplicate variable block for %q", name) + } + mod.Variables[name] = v + + } + } + + if outputs := list.Filter("output"); len(outputs.Items) > 0 { + outputs = outputs.Children() + type OutputBlock struct { + Description string + } + + for _, item := range outputs.Items { + unwrapLegacyHCLObjectKeysFromJSON(item, 1) + + if len(item.Keys) != 1 { + return nil, diagnosticsErrorf("output block at %s has no label", item.Pos()) + } + + name := item.Keys[0].Token.Value().(string) + + var block OutputBlock + err := legacyhcl.DecodeObject(&block, item.Val) + if err != nil { + return nil, diagnosticsErrorf("invalid output block at %s: %s", item.Pos(), err) + } + + o := &Output{ + Name: name, + Description: block.Description, + Pos: sourcePosLegacyHCL(item.Pos(), filename), + } + if _, exists := mod.Outputs[name]; exists { + return nil, diagnosticsErrorf("duplicate output block for %q", name) + } + mod.Outputs[name] = o + } + } + + for _, blockType := range []string{"resource", "data"} { + if resources := list.Filter(blockType); len(resources.Items) > 0 { + resources = resources.Children() + type ResourceBlock struct { + Provider string + } + + for _, item := range resources.Items { + unwrapLegacyHCLObjectKeysFromJSON(item, 2) + + if len(item.Keys) != 2 { + return nil, diagnosticsErrorf("resource block at %s has wrong label count", item.Pos()) + } + + typeName := item.Keys[0].Token.Value().(string) + name := item.Keys[1].Token.Value().(string) + var mode ResourceMode + var rMap map[string]*Resource + switch blockType { + case "resource": + mode = ManagedResourceMode + rMap = mod.ManagedResources + case "data": + mode = DataResourceMode + rMap = mod.DataResources + } + + var block ResourceBlock + err := legacyhcl.DecodeObject(&block, item.Val) + if err != nil { + return nil, diagnosticsErrorf("invalid resource block at %s: %s", item.Pos(), err) + } + + var providerName, providerAlias string + if dotPos := strings.IndexByte(block.Provider, '.'); dotPos != -1 { + providerName = block.Provider[:dotPos] + providerAlias = block.Provider[dotPos+1:] + } else { + providerName = block.Provider + } + if providerName == "" { + providerName = resourceTypeDefaultProviderName(typeName) + } + + r := &Resource{ + Mode: mode, + Type: typeName, + Name: name, + Provider: ProviderRef{ + Name: providerName, + Alias: providerAlias, + }, + Pos: sourcePosLegacyHCL(item.Pos(), filename), + } + key := r.MapKey() + if _, exists := rMap[key]; exists { + return nil, diagnosticsErrorf("duplicate resource block for %q", key) + } + rMap[key] = r + } + } + + } + + if moduleCalls := list.Filter("module"); len(moduleCalls.Items) > 0 { + moduleCalls = moduleCalls.Children() + type ModuleBlock struct { + Source string + Version string + } + + for _, item := range moduleCalls.Items { + unwrapLegacyHCLObjectKeysFromJSON(item, 1) + + if len(item.Keys) != 1 { + return nil, diagnosticsErrorf("module block at %s has no label", item.Pos()) + } + + name := item.Keys[0].Token.Value().(string) + + var block ModuleBlock + err := legacyhcl.DecodeObject(&block, item.Val) + if err != nil { + return nil, diagnosticsErrorf("module block at %s: %s", item.Pos(), err) + } + + mc := &ModuleCall{ + Name: name, + Source: block.Source, + Version: block.Version, + Pos: sourcePosLegacyHCL(item.Pos(), filename), + } + // it's possible this module call is from an override file + if origMod, exists := mod.ModuleCalls[name]; exists { + if mc.Source == "" { + mc.Source = origMod.Source + } + } + mod.ModuleCalls[name] = mc + } + } + + if providerConfigs := list.Filter("provider"); len(providerConfigs.Items) > 0 { + providerConfigs = providerConfigs.Children() + type ProviderBlock struct { + Version string + } + + for _, item := range providerConfigs.Items { + unwrapLegacyHCLObjectKeysFromJSON(item, 1) + + if len(item.Keys) != 1 { + return nil, diagnosticsErrorf("provider block at %s has no label", item.Pos()) + } + + name := item.Keys[0].Token.Value().(string) + + var block ProviderBlock + err := legacyhcl.DecodeObject(&block, item.Val) + if err != nil { + return nil, diagnosticsErrorf("invalid provider block at %s: %s", item.Pos(), err) + } + + if block.Version != "" { + mod.RequiredProviders[name] = append(mod.RequiredProviders[name], block.Version) + } + + // Even if there wasn't an explicit version required, we still + // need an entry in our map to signal the unversioned dependency. + if _, exists := mod.RequiredProviders[name]; !exists { + mod.RequiredProviders[name] = []string{} + } + + } + } + } + + return mod, nil +} + +// unwrapLegacyHCLObjectKeysFromJSON cleans up an edge case that can occur when +// parsing JSON as input: if we're parsing JSON then directly nested +// items will show up as additional "keys". +// +// For objects that expect a fixed number of keys, this breaks the +// decoding process. This function unwraps the object into what it would've +// looked like if it came directly from HCL by specifying the number of keys +// you expect. +// +// Example: +// +// { "foo": { "baz": {} } } +// +// Will show up with Keys being: []string{"foo", "baz"} +// when we really just want the first two. This function will fix this. +func unwrapLegacyHCLObjectKeysFromJSON(item *legacyast.ObjectItem, depth int) { + if len(item.Keys) > depth && item.Keys[0].Token.JSON { + for len(item.Keys) > depth { + // Pop off the last key + n := len(item.Keys) + key := item.Keys[n-1] + item.Keys[n-1] = nil + item.Keys = item.Keys[:n-1] + + // Wrap our value in a list + item.Val = &legacyast.ObjectType{ + List: &legacyast.ObjectList{ + Items: []*legacyast.ObjectItem{ + &legacyast.ObjectItem{ + Keys: []*legacyast.ObjectKey{key}, + Val: item.Val, + }, + }, + }, + } + } + } +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module.go new file mode 100644 index 0000000000..65ddb23073 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module.go @@ -0,0 +1,35 @@ +package tfconfig + +// Module is the top-level type representing a parsed and processed Terraform +// module. +type Module struct { + // Path is the local filesystem directory where the module was loaded from. + Path string `json:"path"` + + Variables map[string]*Variable `json:"variables"` + Outputs map[string]*Output `json:"outputs"` + + RequiredCore []string `json:"required_core,omitempty"` + RequiredProviders map[string][]string `json:"required_providers"` + + ManagedResources map[string]*Resource `json:"managed_resources"` + DataResources map[string]*Resource `json:"data_resources"` + ModuleCalls map[string]*ModuleCall `json:"module_calls"` + + // Diagnostics records any errors and warnings that were detected during + // loading, primarily for inclusion in serialized forms of the module + // since this slice is also returned as a second argument from LoadModule. + Diagnostics Diagnostics `json:"diagnostics,omitempty"` +} + +func newModule(path string) *Module { + return &Module{ + Path: path, + Variables: make(map[string]*Variable), + Outputs: make(map[string]*Output), + RequiredProviders: make(map[string][]string), + ManagedResources: make(map[string]*Resource), + DataResources: make(map[string]*Resource), + ModuleCalls: make(map[string]*ModuleCall), + } +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module_call.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module_call.go new file mode 100644 index 0000000000..5e1e05a726 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module_call.go @@ -0,0 +1,11 @@ +package tfconfig + +// ModuleCall represents a "module" block within a module. That is, a +// declaration of a child module from inside its parent. +type ModuleCall struct { + Name string `json:"name"` + Source string `json:"source"` + Version string `json:"version,omitempty"` + + Pos SourcePos `json:"pos"` +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/output.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/output.go new file mode 100644 index 0000000000..890b25e694 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/output.go @@ -0,0 +1,9 @@ +package tfconfig + +// Output represents a single output from a Terraform module. +type Output struct { + Name string `json:"name"` + Description string `json:"description,omitempty"` + + Pos SourcePos `json:"pos"` +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/provider_ref.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/provider_ref.go new file mode 100644 index 0000000000..d924837785 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/provider_ref.go @@ -0,0 +1,9 @@ +package tfconfig + +// ProviderRef is a reference to a provider configuration within a module. +// It represents the contents of a "provider" argument in a resource, or +// a value in the "providers" map for a module call. +type ProviderRef struct { + Name string `json:"name"` + Alias string `json:"alias,omitempty"` // Empty if the default provider configuration is referenced +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/resource.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/resource.go new file mode 100644 index 0000000000..401c8fce97 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/resource.go @@ -0,0 +1,64 @@ +package tfconfig + +import ( + "fmt" + "strconv" + "strings" +) + +// Resource represents a single "resource" or "data" block within a module. +type Resource struct { + Mode ResourceMode `json:"mode"` + Type string `json:"type"` + Name string `json:"name"` + + Provider ProviderRef `json:"provider"` + + Pos SourcePos `json:"pos"` +} + +// MapKey returns a string that can be used to uniquely identify the receiver +// in a map[string]*Resource. +func (r *Resource) MapKey() string { + switch r.Mode { + case ManagedResourceMode: + return fmt.Sprintf("%s.%s", r.Type, r.Name) + case DataResourceMode: + return fmt.Sprintf("data.%s.%s", r.Type, r.Name) + default: + // should never happen + return fmt.Sprintf("[invalid_mode!].%s.%s", r.Type, r.Name) + } +} + +// ResourceMode represents the "mode" of a resource, which is used to +// distinguish between managed resources ("resource" blocks in config) and +// data resources ("data" blocks in config). +type ResourceMode rune + +const InvalidResourceMode ResourceMode = 0 +const ManagedResourceMode ResourceMode = 'M' +const DataResourceMode ResourceMode = 'D' + +func (m ResourceMode) String() string { + switch m { + case ManagedResourceMode: + return "managed" + case DataResourceMode: + return "data" + default: + return "" + } +} + +// MarshalJSON implements encoding/json.Marshaler. +func (m ResourceMode) MarshalJSON() ([]byte, error) { + return []byte(strconv.Quote(m.String())), nil +} + +func resourceTypeDefaultProviderName(typeName string) string { + if underPos := strings.IndexByte(typeName, '_'); underPos != -1 { + return typeName[:underPos] + } + return typeName +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/schema.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/schema.go new file mode 100644 index 0000000000..3af742ff71 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/schema.go @@ -0,0 +1,106 @@ +package tfconfig + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +var rootSchema = &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "terraform", + LabelNames: nil, + }, + { + Type: "variable", + LabelNames: []string{"name"}, + }, + { + Type: "output", + LabelNames: []string{"name"}, + }, + { + Type: "provider", + LabelNames: []string{"name"}, + }, + { + Type: "resource", + LabelNames: []string{"type", "name"}, + }, + { + Type: "data", + LabelNames: []string{"type", "name"}, + }, + { + Type: "module", + LabelNames: []string{"name"}, + }, + }, +} + +var terraformBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "required_version", + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "required_providers", + }, + }, +} + +var providerConfigSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "version", + }, + { + Name: "alias", + }, + }, +} + +var variableSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "type", + }, + { + Name: "description", + }, + { + Name: "default", + }, + }, +} + +var outputSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "description", + }, + }, +} + +var moduleCallSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "source", + }, + { + Name: "version", + }, + { + Name: "providers", + }, + }, +} + +var resourceSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "provider", + }, + }, +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/source_pos.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/source_pos.go new file mode 100644 index 0000000000..883914eb7b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/source_pos.go @@ -0,0 +1,50 @@ +package tfconfig + +import ( + legacyhcltoken "github.com/hashicorp/hcl/hcl/token" + "github.com/hashicorp/hcl2/hcl" +) + +// SourcePos is a pointer to a particular location in a source file. +// +// This type is embedded into other structs to allow callers to locate the +// definition of each described module element. The SourcePos of an element +// is usually the first line of its definition, although the definition can +// be a little "fuzzy" with JSON-based config files. +type SourcePos struct { + Filename string `json:"filename"` + Line int `json:"line"` +} + +func sourcePos(filename string, line int) SourcePos { + return SourcePos{ + Filename: filename, + Line: line, + } +} + +func sourcePosHCL(rng hcl.Range) SourcePos { + // We intentionally throw away the column information here because + // current and legacy HCL both disagree on the definition of a column + // and so a line-only reference is the best granularity we can do + // such that the result is consistent between both parsers. + return SourcePos{ + Filename: rng.Filename, + Line: rng.Start.Line, + } +} + +func sourcePosLegacyHCL(pos legacyhcltoken.Pos, filename string) SourcePos { + useFilename := pos.Filename + // We'll try to use the filename given in legacy HCL position, but + // in practice there's no way to actually get this populated via + // the HCL API so it's usually empty except in some specialized + // situations, such as positions in error objects. + if useFilename == "" { + useFilename = filename + } + return SourcePos{ + Filename: useFilename, + Line: pos.Line, + } +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/variable.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/variable.go new file mode 100644 index 0000000000..0f73fc995a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/variable.go @@ -0,0 +1,16 @@ +package tfconfig + +// Variable represents a single variable from a Terraform module. +type Variable struct { + Name string `json:"name"` + Type string `json:"type,omitempty"` + Description string `json:"description,omitempty"` + + // Default is an approximate representation of the default value in + // the native Go type system. The conversion from the value given in + // configuration may be slightly lossy. Only values that can be + // serialized by json.Marshal will be included here. + Default interface{} `json:"default,omitempty"` + + Pos SourcePos `json:"pos"` +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/count_attr.go b/vendor/github.com/hashicorp/terraform/addrs/count_attr.go new file mode 100644 index 0000000000..90a5faf0ed --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/count_attr.go @@ -0,0 +1,12 @@ +package addrs + +// CountAttr is the address of an attribute of the "count" object in +// the interpolation scope, like "count.index". +type CountAttr struct { + referenceable + Name string +} + +func (ca CountAttr) String() string { + return "count." + ca.Name +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/doc.go b/vendor/github.com/hashicorp/terraform/addrs/doc.go new file mode 100644 index 0000000000..46093314fe --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/doc.go @@ -0,0 +1,17 @@ +// Package addrs contains types that represent "addresses", which are +// references to specific objects within a Terraform configuration or +// state. +// +// All addresses have string representations based on HCL traversal syntax +// which should be used in the user-interface, and also in-memory +// representations that can be used internally. +// +// For object types that exist within Terraform modules a pair of types is +// used. The "local" part of the address is represented by a type, and then +// an absolute path to that object in the context of its module is represented +// by a type of the same name with an "Abs" prefix added, for "absolute". +// +// All types within this package should be treated as immutable, even if this +// is not enforced by the Go compiler. It is always an implementation error +// to modify an address object in-place after it is initially constructed. +package addrs diff --git a/vendor/github.com/hashicorp/terraform/addrs/input_variable.go b/vendor/github.com/hashicorp/terraform/addrs/input_variable.go new file mode 100644 index 0000000000..d2c046c111 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/input_variable.go @@ -0,0 +1,41 @@ +package addrs + +import ( + "fmt" +) + +// InputVariable is the address of an input variable. +type InputVariable struct { + referenceable + Name string +} + +func (v InputVariable) String() string { + return "var." + v.Name +} + +// AbsInputVariableInstance is the address of an input variable within a +// particular module instance. +type AbsInputVariableInstance struct { + Module ModuleInstance + Variable InputVariable +} + +// InputVariable returns the absolute address of the input variable of the +// given name inside the receiving module instance. +func (m ModuleInstance) InputVariable(name string) AbsInputVariableInstance { + return AbsInputVariableInstance{ + Module: m, + Variable: InputVariable{ + Name: name, + }, + } +} + +func (v AbsInputVariableInstance) String() string { + if len(v.Module) == 0 { + return v.String() + } + + return fmt.Sprintf("%s.%s", v.Module.String(), v.Variable.String()) +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/instance_key.go b/vendor/github.com/hashicorp/terraform/addrs/instance_key.go new file mode 100644 index 0000000000..cef8b27964 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/instance_key.go @@ -0,0 +1,123 @@ +package addrs + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" +) + +// InstanceKey represents the key of an instance within an object that +// contains multiple instances due to using "count" or "for_each" arguments +// in configuration. +// +// IntKey and StringKey are the two implementations of this type. No other +// implementations are allowed. The single instance of an object that _isn't_ +// using "count" or "for_each" is represented by NoKey, which is a nil +// InstanceKey. +type InstanceKey interface { + instanceKeySigil() + String() string +} + +// ParseInstanceKey returns the instance key corresponding to the given value, +// which must be known and non-null. +// +// If an unknown or null value is provided then this function will panic. This +// function is intended to deal with the values that would naturally be found +// in a hcl.TraverseIndex, which (when parsed from source, at least) can never +// contain unknown or null values. +func ParseInstanceKey(key cty.Value) (InstanceKey, error) { + switch key.Type() { + case cty.String: + return StringKey(key.AsString()), nil + case cty.Number: + var idx int + err := gocty.FromCtyValue(key, &idx) + return IntKey(idx), err + default: + return NoKey, fmt.Errorf("either a string or an integer is required") + } +} + +// NoKey represents the absense of an InstanceKey, for the single instance +// of a configuration object that does not use "count" or "for_each" at all. +var NoKey InstanceKey + +// IntKey is the InstanceKey representation representing integer indices, as +// used when the "count" argument is specified or if for_each is used with +// a sequence type. +type IntKey int + +func (k IntKey) instanceKeySigil() { +} + +func (k IntKey) String() string { + return fmt.Sprintf("[%d]", int(k)) +} + +// StringKey is the InstanceKey representation representing string indices, as +// used when the "for_each" argument is specified with a map or object type. +type StringKey string + +func (k StringKey) instanceKeySigil() { +} + +func (k StringKey) String() string { + // FIXME: This isn't _quite_ right because Go's quoted string syntax is + // slightly different than HCL's, but we'll accept it for now. + return fmt.Sprintf("[%q]", string(k)) +} + +// InstanceKeyLess returns true if the first given instance key i should sort +// before the second key j, and false otherwise. +func InstanceKeyLess(i, j InstanceKey) bool { + iTy := instanceKeyType(i) + jTy := instanceKeyType(j) + + switch { + case i == j: + return false + case i == NoKey: + return true + case j == NoKey: + return false + case iTy != jTy: + // The ordering here is arbitrary except that we want NoKeyType + // to sort before the others, so we'll just use the enum values + // of InstanceKeyType here (where NoKey is zero, sorting before + // any other). + return uint32(iTy) < uint32(jTy) + case iTy == IntKeyType: + return int(i.(IntKey)) < int(j.(IntKey)) + case iTy == StringKeyType: + return string(i.(StringKey)) < string(j.(StringKey)) + default: + // Shouldn't be possible to get down here in practice, since the + // above is exhaustive. + return false + } +} + +func instanceKeyType(k InstanceKey) InstanceKeyType { + if _, ok := k.(StringKey); ok { + return StringKeyType + } + if _, ok := k.(IntKey); ok { + return IntKeyType + } + return NoKeyType +} + +// InstanceKeyType represents the different types of instance key that are +// supported. Usually it is sufficient to simply type-assert an InstanceKey +// value to either IntKey or StringKey, but this type and its values can be +// used to represent the types themselves, rather than specific values +// of those types. +type InstanceKeyType rune + +const ( + NoKeyType InstanceKeyType = 0 + IntKeyType InstanceKeyType = 'I' + StringKeyType InstanceKeyType = 'S' +) diff --git a/vendor/github.com/hashicorp/terraform/addrs/local_value.go b/vendor/github.com/hashicorp/terraform/addrs/local_value.go new file mode 100644 index 0000000000..61a07b9c75 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/local_value.go @@ -0,0 +1,48 @@ +package addrs + +import ( + "fmt" +) + +// LocalValue is the address of a local value. +type LocalValue struct { + referenceable + Name string +} + +func (v LocalValue) String() string { + return "local." + v.Name +} + +// Absolute converts the receiver into an absolute address within the given +// module instance. +func (v LocalValue) Absolute(m ModuleInstance) AbsLocalValue { + return AbsLocalValue{ + Module: m, + LocalValue: v, + } +} + +// AbsLocalValue is the absolute address of a local value within a module instance. +type AbsLocalValue struct { + Module ModuleInstance + LocalValue LocalValue +} + +// LocalValue returns the absolute address of a local value of the given +// name within the receiving module instance. +func (m ModuleInstance) LocalValue(name string) AbsLocalValue { + return AbsLocalValue{ + Module: m, + LocalValue: LocalValue{ + Name: name, + }, + } +} + +func (v AbsLocalValue) String() string { + if len(v.Module) == 0 { + return v.LocalValue.String() + } + return fmt.Sprintf("%s.%s", v.Module.String(), v.LocalValue.String()) +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/module.go b/vendor/github.com/hashicorp/terraform/addrs/module.go new file mode 100644 index 0000000000..6420c63018 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/module.go @@ -0,0 +1,75 @@ +package addrs + +import ( + "strings" +) + +// Module is an address for a module call within configuration. This is +// the static counterpart of ModuleInstance, representing a traversal through +// the static module call tree in configuration and does not take into account +// the potentially-multiple instances of a module that might be created by +// "count" and "for_each" arguments within those calls. +// +// This type should be used only in very specialized cases when working with +// the static module call tree. Type ModuleInstance is appropriate in more cases. +// +// Although Module is a slice, it should be treated as immutable after creation. +type Module []string + +// RootModule is the module address representing the root of the static module +// call tree, which is also the zero value of Module. +// +// Note that this is not the root of the dynamic module tree, which is instead +// represented by RootModuleInstance. +var RootModule Module + +// IsRoot returns true if the receiver is the address of the root module, +// or false otherwise. +func (m Module) IsRoot() bool { + return len(m) == 0 +} + +func (m Module) String() string { + if len(m) == 0 { + return "" + } + return strings.Join([]string(m), ".") +} + +// Child returns the address of a child call in the receiver, identified by the +// given name. +func (m Module) Child(name string) Module { + ret := make(Module, 0, len(m)+1) + ret = append(ret, m...) + return append(ret, name) +} + +// Parent returns the address of the parent module of the receiver, or the +// receiver itself if there is no parent (if it's the root module address). +func (m Module) Parent() Module { + if len(m) == 0 { + return m + } + return m[:len(m)-1] +} + +// Call returns the module call address that corresponds to the given module +// instance, along with the address of the module that contains it. +// +// There is no call for the root module, so this method will panic if called +// on the root module address. +// +// In practice, this just turns the last element of the receiver into a +// ModuleCall and then returns a slice of the receiever that excludes that +// last part. This is just a convenience for situations where a call address +// is required, such as when dealing with *Reference and Referencable values. +func (m Module) Call() (Module, ModuleCall) { + if len(m) == 0 { + panic("cannot produce ModuleCall for root module") + } + + caller, callName := m[:len(m)-1], m[len(m)-1] + return caller, ModuleCall{ + Name: callName, + } +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/module_call.go b/vendor/github.com/hashicorp/terraform/addrs/module_call.go new file mode 100644 index 0000000000..09596cc84a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/module_call.go @@ -0,0 +1,81 @@ +package addrs + +import ( + "fmt" +) + +// ModuleCall is the address of a call from the current module to a child +// module. +// +// There is no "Abs" version of ModuleCall because an absolute module path +// is represented by ModuleInstance. +type ModuleCall struct { + referenceable + Name string +} + +func (c ModuleCall) String() string { + return "module." + c.Name +} + +// Instance returns the address of an instance of the receiver identified by +// the given key. +func (c ModuleCall) Instance(key InstanceKey) ModuleCallInstance { + return ModuleCallInstance{ + Call: c, + Key: key, + } +} + +// ModuleCallInstance is the address of one instance of a module created from +// a module call, which might create multiple instances using "count" or +// "for_each" arguments. +type ModuleCallInstance struct { + referenceable + Call ModuleCall + Key InstanceKey +} + +func (c ModuleCallInstance) String() string { + if c.Key == NoKey { + return c.Call.String() + } + return fmt.Sprintf("module.%s%s", c.Call.Name, c.Key) +} + +// ModuleInstance returns the address of the module instance that corresponds +// to the receiving call instance when resolved in the given calling module. +// In other words, it returns the child module instance that the receving +// call instance creates. +func (c ModuleCallInstance) ModuleInstance(caller ModuleInstance) ModuleInstance { + return caller.Child(c.Call.Name, c.Key) +} + +// Output returns the address of an output of the receiver identified by its +// name. +func (c ModuleCallInstance) Output(name string) ModuleCallOutput { + return ModuleCallOutput{ + Call: c, + Name: name, + } +} + +// ModuleCallOutput is the address of a particular named output produced by +// an instance of a module call. +type ModuleCallOutput struct { + referenceable + Call ModuleCallInstance + Name string +} + +func (co ModuleCallOutput) String() string { + return fmt.Sprintf("%s.%s", co.Call.String(), co.Name) +} + +// AbsOutputValue returns the absolute output value address that corresponds +// to the receving module call output address, once resolved in the given +// calling module. +func (co ModuleCallOutput) AbsOutputValue(caller ModuleInstance) AbsOutputValue { + moduleAddr := co.Call.ModuleInstance(caller) + return moduleAddr.OutputValue(co.Name) +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/module_instance.go b/vendor/github.com/hashicorp/terraform/addrs/module_instance.go new file mode 100644 index 0000000000..67e73e52bc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/module_instance.go @@ -0,0 +1,415 @@ +package addrs + +import ( + "bytes" + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" + + "github.com/hashicorp/terraform/tfdiags" +) + +// ModuleInstance is an address for a particular module instance within the +// dynamic module tree. This is an extension of the static traversals +// represented by type Module that deals with the possibility of a single +// module call producing multiple instances via the "count" and "for_each" +// arguments. +// +// Although ModuleInstance is a slice, it should be treated as immutable after +// creation. +type ModuleInstance []ModuleInstanceStep + +var ( + _ Targetable = ModuleInstance(nil) +) + +func ParseModuleInstance(traversal hcl.Traversal) (ModuleInstance, tfdiags.Diagnostics) { + mi, remain, diags := parseModuleInstancePrefix(traversal) + if len(remain) != 0 { + if len(remain) == len(traversal) { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid module instance address", + Detail: "A module instance address must begin with \"module.\".", + Subject: remain.SourceRange().Ptr(), + }) + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid module instance address", + Detail: "The module instance address is followed by additional invalid content.", + Subject: remain.SourceRange().Ptr(), + }) + } + } + return mi, diags +} + +// ParseModuleInstanceStr is a helper wrapper around ParseModuleInstance +// that takes a string and parses it with the HCL native syntax traversal parser +// before interpreting it. +// +// This should be used only in specialized situations since it will cause the +// created references to not have any meaningful source location information. +// If a reference string is coming from a source that should be identified in +// error messages then the caller should instead parse it directly using a +// suitable function from the HCL API and pass the traversal itself to +// ParseProviderConfigCompact. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// then the returned address is invalid. +func ParseModuleInstanceStr(str string) (ModuleInstance, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return nil, diags + } + + addr, addrDiags := ParseModuleInstance(traversal) + diags = diags.Append(addrDiags) + return addr, diags +} + +func parseModuleInstancePrefix(traversal hcl.Traversal) (ModuleInstance, hcl.Traversal, tfdiags.Diagnostics) { + remain := traversal + var mi ModuleInstance + var diags tfdiags.Diagnostics + + for len(remain) > 0 { + var next string + switch tt := remain[0].(type) { + case hcl.TraverseRoot: + next = tt.Name + case hcl.TraverseAttr: + next = tt.Name + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address operator", + Detail: "Module address prefix must be followed by dot and then a name.", + Subject: remain[0].SourceRange().Ptr(), + }) + break + } + + if next != "module" { + break + } + + kwRange := remain[0].SourceRange() + remain = remain[1:] + // If we have the prefix "module" then we should be followed by an + // module call name, as an attribute, and then optionally an index step + // giving the instance key. + if len(remain) == 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address operator", + Detail: "Prefix \"module.\" must be followed by a module name.", + Subject: &kwRange, + }) + break + } + + var moduleName string + switch tt := remain[0].(type) { + case hcl.TraverseAttr: + moduleName = tt.Name + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address operator", + Detail: "Prefix \"module.\" must be followed by a module name.", + Subject: remain[0].SourceRange().Ptr(), + }) + break + } + remain = remain[1:] + step := ModuleInstanceStep{ + Name: moduleName, + } + + if len(remain) > 0 { + if idx, ok := remain[0].(hcl.TraverseIndex); ok { + remain = remain[1:] + + switch idx.Key.Type() { + case cty.String: + step.InstanceKey = StringKey(idx.Key.AsString()) + case cty.Number: + var idxInt int + err := gocty.FromCtyValue(idx.Key, &idxInt) + if err == nil { + step.InstanceKey = IntKey(idxInt) + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address operator", + Detail: fmt.Sprintf("Invalid module index: %s.", err), + Subject: idx.SourceRange().Ptr(), + }) + } + default: + // Should never happen, because no other types are allowed in traversal indices. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address operator", + Detail: "Invalid module key: must be either a string or an integer.", + Subject: idx.SourceRange().Ptr(), + }) + } + } + } + + mi = append(mi, step) + } + + var retRemain hcl.Traversal + if len(remain) > 0 { + retRemain = make(hcl.Traversal, len(remain)) + copy(retRemain, remain) + // The first element here might be either a TraverseRoot or a + // TraverseAttr, depending on whether we had a module address on the + // front. To make life easier for callers, we'll normalize to always + // start with a TraverseRoot. + if tt, ok := retRemain[0].(hcl.TraverseAttr); ok { + retRemain[0] = hcl.TraverseRoot{ + Name: tt.Name, + SrcRange: tt.SrcRange, + } + } + } + + return mi, retRemain, diags +} + +// UnkeyedInstanceShim is a shim method for converting a Module address to the +// equivalent ModuleInstance address that assumes that no modules have +// keyed instances. +// +// This is a temporary allowance for the fact that Terraform does not presently +// support "count" and "for_each" on modules, and thus graph building code that +// derives graph nodes from configuration must just assume unkeyed modules +// in order to construct the graph. At a later time when "count" and "for_each" +// support is added for modules, all callers of this method will need to be +// reworked to allow for keyed module instances. +func (m Module) UnkeyedInstanceShim() ModuleInstance { + path := make(ModuleInstance, len(m)) + for i, name := range m { + path[i] = ModuleInstanceStep{Name: name} + } + return path +} + +// ModuleInstanceStep is a single traversal step through the dynamic module +// tree. It is used only as part of ModuleInstance. +type ModuleInstanceStep struct { + Name string + InstanceKey InstanceKey +} + +// RootModuleInstance is the module instance address representing the root +// module, which is also the zero value of ModuleInstance. +var RootModuleInstance ModuleInstance + +// IsRoot returns true if the receiver is the address of the root module instance, +// or false otherwise. +func (m ModuleInstance) IsRoot() bool { + return len(m) == 0 +} + +// Child returns the address of a child module instance of the receiver, +// identified by the given name and key. +func (m ModuleInstance) Child(name string, key InstanceKey) ModuleInstance { + ret := make(ModuleInstance, 0, len(m)+1) + ret = append(ret, m...) + return append(ret, ModuleInstanceStep{ + Name: name, + InstanceKey: key, + }) +} + +// Parent returns the address of the parent module instance of the receiver, or +// the receiver itself if there is no parent (if it's the root module address). +func (m ModuleInstance) Parent() ModuleInstance { + if len(m) == 0 { + return m + } + return m[:len(m)-1] +} + +// String returns a string representation of the receiver, in the format used +// within e.g. user-provided resource addresses. +// +// The address of the root module has the empty string as its representation. +func (m ModuleInstance) String() string { + var buf bytes.Buffer + sep := "" + for _, step := range m { + buf.WriteString(sep) + buf.WriteString("module.") + buf.WriteString(step.Name) + if step.InstanceKey != NoKey { + buf.WriteString(step.InstanceKey.String()) + } + sep = "." + } + return buf.String() +} + +// Equal returns true if the receiver and the given other value +// contains the exact same parts. +func (m ModuleInstance) Equal(o ModuleInstance) bool { + return m.String() == o.String() +} + +// Less returns true if the receiver should sort before the given other value +// in a sorted list of addresses. +func (m ModuleInstance) Less(o ModuleInstance) bool { + if len(m) != len(o) { + // Shorter path sorts first. + return len(m) < len(o) + } + + for i := range m { + mS, oS := m[i], o[i] + switch { + case mS.Name != oS.Name: + return mS.Name < oS.Name + case mS.InstanceKey != oS.InstanceKey: + return InstanceKeyLess(mS.InstanceKey, oS.InstanceKey) + } + } + + return false +} + +// Ancestors returns a slice containing the receiver and all of its ancestor +// module instances, all the way up to (and including) the root module. +// The result is ordered by depth, with the root module always first. +// +// Since the result always includes the root module, a caller may choose to +// ignore it by slicing the result with [1:]. +func (m ModuleInstance) Ancestors() []ModuleInstance { + ret := make([]ModuleInstance, 0, len(m)+1) + for i := 0; i <= len(m); i++ { + ret = append(ret, m[:i]) + } + return ret +} + +// IsAncestor returns true if the receiver is an ancestor of the given +// other value. +func (m ModuleInstance) IsAncestor(o ModuleInstance) bool { + // Longer or equal sized paths means the receiver cannot + // be an ancestor of the given module insatnce. + if len(m) >= len(o) { + return false + } + + for i, ms := range m { + if ms.Name != o[i].Name { + return false + } + if ms.InstanceKey != NoKey && ms.InstanceKey != o[i].InstanceKey { + return false + } + } + + return true +} + +// Call returns the module call address that corresponds to the given module +// instance, along with the address of the module instance that contains it. +// +// There is no call for the root module, so this method will panic if called +// on the root module address. +// +// A single module call can produce potentially many module instances, so the +// result discards any instance key that might be present on the last step +// of the instance. To retain this, use CallInstance instead. +// +// In practice, this just turns the last element of the receiver into a +// ModuleCall and then returns a slice of the receiever that excludes that +// last part. This is just a convenience for situations where a call address +// is required, such as when dealing with *Reference and Referencable values. +func (m ModuleInstance) Call() (ModuleInstance, ModuleCall) { + if len(m) == 0 { + panic("cannot produce ModuleCall for root module") + } + + inst, lastStep := m[:len(m)-1], m[len(m)-1] + return inst, ModuleCall{ + Name: lastStep.Name, + } +} + +// CallInstance returns the module call instance address that corresponds to +// the given module instance, along with the address of the module instance +// that contains it. +// +// There is no call for the root module, so this method will panic if called +// on the root module address. +// +// In practice, this just turns the last element of the receiver into a +// ModuleCallInstance and then returns a slice of the receiever that excludes +// that last part. This is just a convenience for situations where a call\ +// address is required, such as when dealing with *Reference and Referencable +// values. +func (m ModuleInstance) CallInstance() (ModuleInstance, ModuleCallInstance) { + if len(m) == 0 { + panic("cannot produce ModuleCallInstance for root module") + } + + inst, lastStep := m[:len(m)-1], m[len(m)-1] + return inst, ModuleCallInstance{ + Call: ModuleCall{ + Name: lastStep.Name, + }, + Key: lastStep.InstanceKey, + } +} + +// TargetContains implements Targetable by returning true if the given other +// address either matches the receiver, is a sub-module-instance of the +// receiver, or is a targetable absolute address within a module that +// is contained within the reciever. +func (m ModuleInstance) TargetContains(other Targetable) bool { + switch to := other.(type) { + + case ModuleInstance: + if len(to) < len(m) { + // Can't be contained if the path is shorter + return false + } + // Other is contained if its steps match for the length of our own path. + for i, ourStep := range m { + otherStep := to[i] + if ourStep != otherStep { + return false + } + } + // If we fall out here then the prefixed matched, so it's contained. + return true + + case AbsResource: + return m.TargetContains(to.Module) + + case AbsResourceInstance: + return m.TargetContains(to.Module) + + default: + return false + } +} + +func (m ModuleInstance) targetableSigil() { + // ModuleInstance is targetable +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/output_value.go b/vendor/github.com/hashicorp/terraform/addrs/output_value.go new file mode 100644 index 0000000000..bcd923acb7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/output_value.go @@ -0,0 +1,75 @@ +package addrs + +import ( + "fmt" +) + +// OutputValue is the address of an output value, in the context of the module +// that is defining it. +// +// This is related to but separate from ModuleCallOutput, which represents +// a module output from the perspective of its parent module. Since output +// values cannot be represented from the module where they are defined, +// OutputValue is not Referenceable, while ModuleCallOutput is. +type OutputValue struct { + Name string +} + +func (v OutputValue) String() string { + return "output." + v.Name +} + +// Absolute converts the receiver into an absolute address within the given +// module instance. +func (v OutputValue) Absolute(m ModuleInstance) AbsOutputValue { + return AbsOutputValue{ + Module: m, + OutputValue: v, + } +} + +// AbsOutputValue is the absolute address of an output value within a module instance. +// +// This represents an output globally within the namespace of a particular +// configuration. It is related to but separate from ModuleCallOutput, which +// represents a module output from the perspective of its parent module. +type AbsOutputValue struct { + Module ModuleInstance + OutputValue OutputValue +} + +// OutputValue returns the absolute address of an output value of the given +// name within the receiving module instance. +func (m ModuleInstance) OutputValue(name string) AbsOutputValue { + return AbsOutputValue{ + Module: m, + OutputValue: OutputValue{ + Name: name, + }, + } +} + +func (v AbsOutputValue) String() string { + if v.Module.IsRoot() { + return v.OutputValue.String() + } + return fmt.Sprintf("%s.%s", v.Module.String(), v.OutputValue.String()) +} + +// ModuleCallOutput converts an AbsModuleOutput into a ModuleCallOutput, +// returning also the module instance that the ModuleCallOutput is relative +// to. +// +// The root module does not have a call, and so this method cannot be used +// with outputs in the root module, and will panic in that case. +func (v AbsOutputValue) ModuleCallOutput() (ModuleInstance, ModuleCallOutput) { + if v.Module.IsRoot() { + panic("ReferenceFromCall used with root module output") + } + + caller, call := v.Module.CallInstance() + return caller, ModuleCallOutput{ + Call: call, + Name: v.OutputValue.Name, + } +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/parse_ref.go b/vendor/github.com/hashicorp/terraform/addrs/parse_ref.go new file mode 100644 index 0000000000..84fe8a0d05 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/parse_ref.go @@ -0,0 +1,338 @@ +package addrs + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/hashicorp/terraform/tfdiags" +) + +// Reference describes a reference to an address with source location +// information. +type Reference struct { + Subject Referenceable + SourceRange tfdiags.SourceRange + Remaining hcl.Traversal +} + +// ParseRef attempts to extract a referencable address from the prefix of the +// given traversal, which must be an absolute traversal or this function +// will panic. +// +// If no error diagnostics are returned, the returned reference includes the +// address that was extracted, the source range it was extracted from, and any +// remaining relative traversal that was not consumed as part of the +// reference. +// +// If error diagnostics are returned then the Reference value is invalid and +// must not be used. +func ParseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) { + ref, diags := parseRef(traversal) + + // Normalize a little to make life easier for callers. + if ref != nil { + if len(ref.Remaining) == 0 { + ref.Remaining = nil + } + } + + return ref, diags +} + +// ParseRefStr is a helper wrapper around ParseRef that takes a string +// and parses it with the HCL native syntax traversal parser before +// interpreting it. +// +// This should be used only in specialized situations since it will cause the +// created references to not have any meaningful source location information. +// If a reference string is coming from a source that should be identified in +// error messages then the caller should instead parse it directly using a +// suitable function from the HCL API and pass the traversal itself to +// ParseRef. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// the returned reference may be nil or incomplete. +func ParseRefStr(str string) (*Reference, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return nil, diags + } + + ref, targetDiags := ParseRef(traversal) + diags = diags.Append(targetDiags) + return ref, diags +} + +func parseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + root := traversal.RootName() + rootRange := traversal[0].SourceRange() + + switch root { + + case "count": + name, rng, remain, diags := parseSingleAttrRef(traversal) + return &Reference{ + Subject: CountAttr{Name: name}, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags + + case "data": + if len(traversal) < 3 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: `The "data" object must be followed by two attribute names: the data source type and the resource name.`, + Subject: traversal.SourceRange().Ptr(), + }) + return nil, diags + } + remain := traversal[1:] // trim off "data" so we can use our shared resource reference parser + return parseResourceRef(DataResourceMode, rootRange, remain) + + case "local": + name, rng, remain, diags := parseSingleAttrRef(traversal) + return &Reference{ + Subject: LocalValue{Name: name}, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags + + case "module": + callName, callRange, remain, diags := parseSingleAttrRef(traversal) + if diags.HasErrors() { + return nil, diags + } + + // A traversal starting with "module" can either be a reference to + // an entire module instance or to a single output from a module + // instance, depending on what we find after this introducer. + + callInstance := ModuleCallInstance{ + Call: ModuleCall{ + Name: callName, + }, + Key: NoKey, + } + + if len(remain) == 0 { + // Reference to an entire module instance. Might alternatively + // be a reference to a collection of instances of a particular + // module, but the caller will need to deal with that ambiguity + // since we don't have enough context here. + return &Reference{ + Subject: callInstance, + SourceRange: tfdiags.SourceRangeFromHCL(callRange), + Remaining: remain, + }, diags + } + + if idxTrav, ok := remain[0].(hcl.TraverseIndex); ok { + var err error + callInstance.Key, err = ParseInstanceKey(idxTrav.Key) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid index key", + Detail: fmt.Sprintf("Invalid index for module instance: %s.", err), + Subject: &idxTrav.SrcRange, + }) + return nil, diags + } + remain = remain[1:] + + if len(remain) == 0 { + // Also a reference to an entire module instance, but we have a key + // now. + return &Reference{ + Subject: callInstance, + SourceRange: tfdiags.SourceRangeFromHCL(hcl.RangeBetween(callRange, idxTrav.SrcRange)), + Remaining: remain, + }, diags + } + } + + if attrTrav, ok := remain[0].(hcl.TraverseAttr); ok { + remain = remain[1:] + return &Reference{ + Subject: ModuleCallOutput{ + Name: attrTrav.Name, + Call: callInstance, + }, + SourceRange: tfdiags.SourceRangeFromHCL(hcl.RangeBetween(callRange, attrTrav.SrcRange)), + Remaining: remain, + }, diags + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: "Module instance objects do not support this operation.", + Subject: remain[0].SourceRange().Ptr(), + }) + return nil, diags + + case "path": + name, rng, remain, diags := parseSingleAttrRef(traversal) + return &Reference{ + Subject: PathAttr{Name: name}, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags + + case "self": + return &Reference{ + Subject: Self, + SourceRange: tfdiags.SourceRangeFromHCL(rootRange), + Remaining: traversal[1:], + }, diags + + case "terraform": + name, rng, remain, diags := parseSingleAttrRef(traversal) + return &Reference{ + Subject: TerraformAttr{Name: name}, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags + + case "var": + name, rng, remain, diags := parseSingleAttrRef(traversal) + return &Reference{ + Subject: InputVariable{Name: name}, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags + + default: + return parseResourceRef(ManagedResourceMode, rootRange, traversal) + } +} + +func parseResourceRef(mode ResourceMode, startRange hcl.Range, traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + if len(traversal) < 2 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: `A reference to a resource type must be followed by at least one attribute access, specifying the resource name.`, + Subject: hcl.RangeBetween(traversal[0].SourceRange(), traversal[len(traversal)-1].SourceRange()).Ptr(), + }) + return nil, diags + } + + var typeName, name string + switch tt := traversal[0].(type) { // Could be either root or attr, depending on our resource mode + case hcl.TraverseRoot: + typeName = tt.Name + case hcl.TraverseAttr: + typeName = tt.Name + default: + // If it isn't a TraverseRoot then it must be a "data" reference. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: `The "data" object does not support this operation.`, + Subject: traversal[0].SourceRange().Ptr(), + }) + return nil, diags + } + + attrTrav, ok := traversal[1].(hcl.TraverseAttr) + if !ok { + var what string + switch mode { + case DataResourceMode: + what = "data source" + default: + what = "resource type" + } + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: fmt.Sprintf(`A reference to a %s must be followed by at least one attribute access, specifying the resource name.`, what), + Subject: traversal[1].SourceRange().Ptr(), + }) + return nil, diags + } + name = attrTrav.Name + rng := hcl.RangeBetween(startRange, attrTrav.SrcRange) + remain := traversal[2:] + + resourceAddr := Resource{ + Mode: mode, + Type: typeName, + Name: name, + } + resourceInstAddr := ResourceInstance{ + Resource: resourceAddr, + Key: NoKey, + } + + if len(remain) == 0 { + // This might actually be a reference to the collection of all instances + // of the resource, but we don't have enough context here to decide + // so we'll let the caller resolve that ambiguity. + return &Reference{ + Subject: resourceInstAddr, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + }, diags + } + + if idxTrav, ok := remain[0].(hcl.TraverseIndex); ok { + var err error + resourceInstAddr.Key, err = ParseInstanceKey(idxTrav.Key) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid index key", + Detail: fmt.Sprintf("Invalid index for resource instance: %s.", err), + Subject: &idxTrav.SrcRange, + }) + return nil, diags + } + remain = remain[1:] + rng = hcl.RangeBetween(rng, idxTrav.SrcRange) + } + + return &Reference{ + Subject: resourceInstAddr, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags +} + +func parseSingleAttrRef(traversal hcl.Traversal) (string, hcl.Range, hcl.Traversal, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + root := traversal.RootName() + rootRange := traversal[0].SourceRange() + + if len(traversal) < 2 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: fmt.Sprintf("The %q object cannot be accessed directly. Instead, access one of its attributes.", root), + Subject: &rootRange, + }) + return "", hcl.Range{}, nil, diags + } + if attrTrav, ok := traversal[1].(hcl.TraverseAttr); ok { + return attrTrav.Name, hcl.RangeBetween(rootRange, attrTrav.SrcRange), traversal[2:], diags + } + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: fmt.Sprintf("The %q object does not support this operation.", root), + Subject: traversal[1].SourceRange().Ptr(), + }) + return "", hcl.Range{}, nil, diags +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/parse_target.go b/vendor/github.com/hashicorp/terraform/addrs/parse_target.go new file mode 100644 index 0000000000..057443af65 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/parse_target.go @@ -0,0 +1,318 @@ +package addrs + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl/hclsyntax" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/tfdiags" +) + +// Target describes a targeted address with source location information. +type Target struct { + Subject Targetable + SourceRange tfdiags.SourceRange +} + +// ParseTarget attempts to interpret the given traversal as a targetable +// address. The given traversal must be absolute, or this function will +// panic. +// +// If no error diagnostics are returned, the returned target includes the +// address that was extracted and the source range it was extracted from. +// +// If error diagnostics are returned then the Target value is invalid and +// must not be used. +func ParseTarget(traversal hcl.Traversal) (*Target, tfdiags.Diagnostics) { + path, remain, diags := parseModuleInstancePrefix(traversal) + if diags.HasErrors() { + return nil, diags + } + + rng := tfdiags.SourceRangeFromHCL(traversal.SourceRange()) + + if len(remain) == 0 { + return &Target{ + Subject: path, + SourceRange: rng, + }, diags + } + + mode := ManagedResourceMode + if remain.RootName() == "data" { + mode = DataResourceMode + remain = remain[1:] + } + + if len(remain) < 2 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "Resource specification must include a resource type and name.", + Subject: remain.SourceRange().Ptr(), + }) + return nil, diags + } + + var typeName, name string + switch tt := remain[0].(type) { + case hcl.TraverseRoot: + typeName = tt.Name + case hcl.TraverseAttr: + typeName = tt.Name + default: + switch mode { + case ManagedResourceMode: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource type name is required.", + Subject: remain[0].SourceRange().Ptr(), + }) + case DataResourceMode: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A data source name is required.", + Subject: remain[0].SourceRange().Ptr(), + }) + default: + panic("unknown mode") + } + return nil, diags + } + + switch tt := remain[1].(type) { + case hcl.TraverseAttr: + name = tt.Name + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource name is required.", + Subject: remain[1].SourceRange().Ptr(), + }) + return nil, diags + } + + var subject Targetable + remain = remain[2:] + switch len(remain) { + case 0: + subject = path.Resource(mode, typeName, name) + case 1: + if tt, ok := remain[0].(hcl.TraverseIndex); ok { + key, err := ParseInstanceKey(tt.Key) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: fmt.Sprintf("Invalid resource instance key: %s.", err), + Subject: remain[0].SourceRange().Ptr(), + }) + return nil, diags + } + + subject = path.ResourceInstance(mode, typeName, name, key) + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "Resource instance key must be given in square brackets.", + Subject: remain[0].SourceRange().Ptr(), + }) + return nil, diags + } + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "Unexpected extra operators after address.", + Subject: remain[1].SourceRange().Ptr(), + }) + return nil, diags + } + + return &Target{ + Subject: subject, + SourceRange: rng, + }, diags +} + +// ParseTargetStr is a helper wrapper around ParseTarget that takes a string +// and parses it with the HCL native syntax traversal parser before +// interpreting it. +// +// This should be used only in specialized situations since it will cause the +// created references to not have any meaningful source location information. +// If a target string is coming from a source that should be identified in +// error messages then the caller should instead parse it directly using a +// suitable function from the HCL API and pass the traversal itself to +// ParseTarget. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// the returned target may be nil or incomplete. +func ParseTargetStr(str string) (*Target, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return nil, diags + } + + target, targetDiags := ParseTarget(traversal) + diags = diags.Append(targetDiags) + return target, diags +} + +// ParseAbsResource attempts to interpret the given traversal as an absolute +// resource address, using the same syntax as expected by ParseTarget. +// +// If no error diagnostics are returned, the returned target includes the +// address that was extracted and the source range it was extracted from. +// +// If error diagnostics are returned then the AbsResource value is invalid and +// must not be used. +func ParseAbsResource(traversal hcl.Traversal) (AbsResource, tfdiags.Diagnostics) { + addr, diags := ParseTarget(traversal) + if diags.HasErrors() { + return AbsResource{}, diags + } + + switch tt := addr.Subject.(type) { + + case AbsResource: + return tt, diags + + case AbsResourceInstance: // Catch likely user error with specialized message + // Assume that the last element of the traversal must be the index, + // since that's required for a valid resource instance address. + indexStep := traversal[len(traversal)-1] + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource address is required. This instance key identifies a specific resource instance, which is not expected here.", + Subject: indexStep.SourceRange().Ptr(), + }) + return AbsResource{}, diags + + case ModuleInstance: // Catch likely user error with specialized message + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource address is required here. The module path must be followed by a resource specification.", + Subject: traversal.SourceRange().Ptr(), + }) + return AbsResource{}, diags + + default: // Generic message for other address types + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource address is required here.", + Subject: traversal.SourceRange().Ptr(), + }) + return AbsResource{}, diags + + } +} + +// ParseAbsResourceStr is a helper wrapper around ParseAbsResource that takes a +// string and parses it with the HCL native syntax traversal parser before +// interpreting it. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// the returned address may be incomplete. +// +// Since this function has no context about the source of the given string, +// any returned diagnostics will not have meaningful source location +// information. +func ParseAbsResourceStr(str string) (AbsResource, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return AbsResource{}, diags + } + + addr, addrDiags := ParseAbsResource(traversal) + diags = diags.Append(addrDiags) + return addr, diags +} + +// ParseAbsResourceInstance attempts to interpret the given traversal as an +// absolute resource instance address, using the same syntax as expected by +// ParseTarget. +// +// If no error diagnostics are returned, the returned target includes the +// address that was extracted and the source range it was extracted from. +// +// If error diagnostics are returned then the AbsResource value is invalid and +// must not be used. +func ParseAbsResourceInstance(traversal hcl.Traversal) (AbsResourceInstance, tfdiags.Diagnostics) { + addr, diags := ParseTarget(traversal) + if diags.HasErrors() { + return AbsResourceInstance{}, diags + } + + switch tt := addr.Subject.(type) { + + case AbsResource: + return tt.Instance(NoKey), diags + + case AbsResourceInstance: + return tt, diags + + case ModuleInstance: // Catch likely user error with specialized message + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource instance address is required here. The module path must be followed by a resource instance specification.", + Subject: traversal.SourceRange().Ptr(), + }) + return AbsResourceInstance{}, diags + + default: // Generic message for other address types + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource address is required here.", + Subject: traversal.SourceRange().Ptr(), + }) + return AbsResourceInstance{}, diags + + } +} + +// ParseAbsResourceInstanceStr is a helper wrapper around +// ParseAbsResourceInstance that takes a string and parses it with the HCL +// native syntax traversal parser before interpreting it. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// the returned address may be incomplete. +// +// Since this function has no context about the source of the given string, +// any returned diagnostics will not have meaningful source location +// information. +func ParseAbsResourceInstanceStr(str string) (AbsResourceInstance, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return AbsResourceInstance{}, diags + } + + addr, addrDiags := ParseAbsResourceInstance(traversal) + diags = diags.Append(addrDiags) + return addr, diags +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/path_attr.go b/vendor/github.com/hashicorp/terraform/addrs/path_attr.go new file mode 100644 index 0000000000..cfc13f4bcd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/path_attr.go @@ -0,0 +1,12 @@ +package addrs + +// PathAttr is the address of an attribute of the "path" object in +// the interpolation scope, like "path.module". +type PathAttr struct { + referenceable + Name string +} + +func (pa PathAttr) String() string { + return "path." + pa.Name +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/provider_config.go b/vendor/github.com/hashicorp/terraform/addrs/provider_config.go new file mode 100644 index 0000000000..340dd19e79 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/provider_config.go @@ -0,0 +1,297 @@ +package addrs + +import ( + "fmt" + + "github.com/hashicorp/terraform/tfdiags" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" +) + +// ProviderConfig is the address of a provider configuration. +type ProviderConfig struct { + Type string + + // If not empty, Alias identifies which non-default (aliased) provider + // configuration this address refers to. + Alias string +} + +// NewDefaultProviderConfig returns the address of the default (un-aliased) +// configuration for the provider with the given type name. +func NewDefaultProviderConfig(typeName string) ProviderConfig { + return ProviderConfig{ + Type: typeName, + } +} + +// ParseProviderConfigCompact parses the given absolute traversal as a relative +// provider address in compact form. The following are examples of traversals +// that can be successfully parsed as compact relative provider configuration +// addresses: +// +// aws +// aws.foo +// +// This function will panic if given a relative traversal. +// +// If the returned diagnostics contains errors then the result value is invalid +// and must not be used. +func ParseProviderConfigCompact(traversal hcl.Traversal) (ProviderConfig, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + ret := ProviderConfig{ + Type: traversal.RootName(), + } + + if len(traversal) < 2 { + // Just a type name, then. + return ret, diags + } + + aliasStep := traversal[1] + switch ts := aliasStep.(type) { + case hcl.TraverseAttr: + ret.Alias = ts.Name + return ret, diags + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "The provider type name must either stand alone or be followed by an alias name separated with a dot.", + Subject: aliasStep.SourceRange().Ptr(), + }) + } + + if len(traversal) > 2 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "Extraneous extra operators after provider configuration address.", + Subject: traversal[2:].SourceRange().Ptr(), + }) + } + + return ret, diags +} + +// ParseProviderConfigCompactStr is a helper wrapper around ParseProviderConfigCompact +// that takes a string and parses it with the HCL native syntax traversal parser +// before interpreting it. +// +// This should be used only in specialized situations since it will cause the +// created references to not have any meaningful source location information. +// If a reference string is coming from a source that should be identified in +// error messages then the caller should instead parse it directly using a +// suitable function from the HCL API and pass the traversal itself to +// ParseProviderConfigCompact. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// then the returned address is invalid. +func ParseProviderConfigCompactStr(str string) (ProviderConfig, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return ProviderConfig{}, diags + } + + addr, addrDiags := ParseProviderConfigCompact(traversal) + diags = diags.Append(addrDiags) + return addr, diags +} + +// Absolute returns an AbsProviderConfig from the receiver and the given module +// instance address. +func (pc ProviderConfig) Absolute(module ModuleInstance) AbsProviderConfig { + return AbsProviderConfig{ + Module: module, + ProviderConfig: pc, + } +} + +func (pc ProviderConfig) String() string { + if pc.Type == "" { + // Should never happen; always indicates a bug + return "provider." + } + + if pc.Alias != "" { + return fmt.Sprintf("provider.%s.%s", pc.Type, pc.Alias) + } + + return "provider." + pc.Type +} + +// StringCompact is an alternative to String that returns the form that can +// be parsed by ParseProviderConfigCompact, without the "provider." prefix. +func (pc ProviderConfig) StringCompact() string { + if pc.Alias != "" { + return fmt.Sprintf("%s.%s", pc.Type, pc.Alias) + } + return pc.Type +} + +// AbsProviderConfig is the absolute address of a provider configuration +// within a particular module instance. +type AbsProviderConfig struct { + Module ModuleInstance + ProviderConfig ProviderConfig +} + +// ParseAbsProviderConfig parses the given traversal as an absolute provider +// address. The following are examples of traversals that can be successfully +// parsed as absolute provider configuration addresses: +// +// provider.aws +// provider.aws.foo +// module.bar.provider.aws +// module.bar.module.baz.provider.aws.foo +// module.foo[1].provider.aws.foo +// +// This type of address is used, for example, to record the relationships +// between resources and provider configurations in the state structure. +// This type of address is not generally used in the UI, except in error +// messages that refer to provider configurations. +func ParseAbsProviderConfig(traversal hcl.Traversal) (AbsProviderConfig, tfdiags.Diagnostics) { + modInst, remain, diags := parseModuleInstancePrefix(traversal) + ret := AbsProviderConfig{ + Module: modInst, + } + if len(remain) < 2 || remain.RootName() != "provider" { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "Provider address must begin with \"provider.\", followed by a provider type name.", + Subject: remain.SourceRange().Ptr(), + }) + return ret, diags + } + if len(remain) > 3 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "Extraneous operators after provider configuration alias.", + Subject: hcl.Traversal(remain[3:]).SourceRange().Ptr(), + }) + return ret, diags + } + + if tt, ok := remain[1].(hcl.TraverseAttr); ok { + ret.ProviderConfig.Type = tt.Name + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "The prefix \"provider.\" must be followed by a provider type name.", + Subject: remain[1].SourceRange().Ptr(), + }) + return ret, diags + } + + if len(remain) == 3 { + if tt, ok := remain[2].(hcl.TraverseAttr); ok { + ret.ProviderConfig.Alias = tt.Name + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "Provider type name must be followed by a configuration alias name.", + Subject: remain[2].SourceRange().Ptr(), + }) + return ret, diags + } + } + + return ret, diags +} + +// ParseAbsProviderConfigStr is a helper wrapper around ParseAbsProviderConfig +// that takes a string and parses it with the HCL native syntax traversal parser +// before interpreting it. +// +// This should be used only in specialized situations since it will cause the +// created references to not have any meaningful source location information. +// If a reference string is coming from a source that should be identified in +// error messages then the caller should instead parse it directly using a +// suitable function from the HCL API and pass the traversal itself to +// ParseAbsProviderConfig. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// the returned address is invalid. +func ParseAbsProviderConfigStr(str string) (AbsProviderConfig, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return AbsProviderConfig{}, diags + } + + addr, addrDiags := ParseAbsProviderConfig(traversal) + diags = diags.Append(addrDiags) + return addr, diags +} + +// ProviderConfigDefault returns the address of the default provider config +// of the given type inside the recieving module instance. +func (m ModuleInstance) ProviderConfigDefault(name string) AbsProviderConfig { + return AbsProviderConfig{ + Module: m, + ProviderConfig: ProviderConfig{ + Type: name, + }, + } +} + +// ProviderConfigAliased returns the address of an aliased provider config +// of with given type and alias inside the recieving module instance. +func (m ModuleInstance) ProviderConfigAliased(name, alias string) AbsProviderConfig { + return AbsProviderConfig{ + Module: m, + ProviderConfig: ProviderConfig{ + Type: name, + Alias: alias, + }, + } +} + +// Inherited returns an address that the receiving configuration address might +// inherit from in a parent module. The second bool return value indicates if +// such inheritance is possible, and thus whether the returned address is valid. +// +// Inheritance is possible only for default (un-aliased) providers in modules +// other than the root module. Even if a valid address is returned, inheritence +// may not be performed for other reasons, such as if the calling module +// provided explicit provider configurations within the call for this module. +// The ProviderTransformer graph transform in the main terraform module has +// the authoritative logic for provider inheritance, and this method is here +// mainly just for its benefit. +func (pc AbsProviderConfig) Inherited() (AbsProviderConfig, bool) { + // Can't inherit if we're already in the root. + if len(pc.Module) == 0 { + return AbsProviderConfig{}, false + } + + // Can't inherit if we have an alias. + if pc.ProviderConfig.Alias != "" { + return AbsProviderConfig{}, false + } + + // Otherwise, we might inherit from a configuration with the same + // provider name in the parent module instance. + parentMod := pc.Module.Parent() + return pc.ProviderConfig.Absolute(parentMod), true +} + +func (pc AbsProviderConfig) String() string { + if len(pc.Module) == 0 { + return pc.ProviderConfig.String() + } + return fmt.Sprintf("%s.%s", pc.Module.String(), pc.ProviderConfig.String()) +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/referenceable.go b/vendor/github.com/hashicorp/terraform/addrs/referenceable.go new file mode 100644 index 0000000000..211083a5f4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/referenceable.go @@ -0,0 +1,20 @@ +package addrs + +// Referenceable is an interface implemented by all address types that can +// appear as references in configuration language expressions. +type Referenceable interface { + // All implementations of this interface must be covered by the type switch + // in lang.Scope.buildEvalContext. + referenceableSigil() + + // String produces a string representation of the address that could be + // parsed as a HCL traversal and passed to ParseRef to produce an identical + // result. + String() string +} + +type referenceable struct { +} + +func (r referenceable) referenceableSigil() { +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/resource.go b/vendor/github.com/hashicorp/terraform/addrs/resource.go new file mode 100644 index 0000000000..28667708e4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/resource.go @@ -0,0 +1,270 @@ +package addrs + +import ( + "fmt" + "strings" +) + +// Resource is an address for a resource block within configuration, which +// contains potentially-multiple resource instances if that configuration +// block uses "count" or "for_each". +type Resource struct { + referenceable + Mode ResourceMode + Type string + Name string +} + +func (r Resource) String() string { + switch r.Mode { + case ManagedResourceMode: + return fmt.Sprintf("%s.%s", r.Type, r.Name) + case DataResourceMode: + return fmt.Sprintf("data.%s.%s", r.Type, r.Name) + default: + // Should never happen, but we'll return a string here rather than + // crashing just in case it does. + return fmt.Sprintf(".%s.%s", r.Type, r.Name) + } +} + +func (r Resource) Equal(o Resource) bool { + return r.String() == o.String() +} + +// Instance produces the address for a specific instance of the receiver +// that is idenfied by the given key. +func (r Resource) Instance(key InstanceKey) ResourceInstance { + return ResourceInstance{ + Resource: r, + Key: key, + } +} + +// Absolute returns an AbsResource from the receiver and the given module +// instance address. +func (r Resource) Absolute(module ModuleInstance) AbsResource { + return AbsResource{ + Module: module, + Resource: r, + } +} + +// DefaultProviderConfig returns the address of the provider configuration +// that should be used for the resource identified by the reciever if it +// does not have a provider configuration address explicitly set in +// configuration. +// +// This method is not able to verify that such a configuration exists, nor +// represent the behavior of automatically inheriting certain provider +// configurations from parent modules. It just does a static analysis of the +// receiving address and returns an address to start from, relative to the +// same module that contains the resource. +func (r Resource) DefaultProviderConfig() ProviderConfig { + typeName := r.Type + if under := strings.Index(typeName, "_"); under != -1 { + typeName = typeName[:under] + } + return ProviderConfig{ + Type: typeName, + } +} + +// ResourceInstance is an address for a specific instance of a resource. +// When a resource is defined in configuration with "count" or "for_each" it +// produces zero or more instances, which can be addressed using this type. +type ResourceInstance struct { + referenceable + Resource Resource + Key InstanceKey +} + +func (r ResourceInstance) ContainingResource() Resource { + return r.Resource +} + +func (r ResourceInstance) String() string { + if r.Key == NoKey { + return r.Resource.String() + } + return r.Resource.String() + r.Key.String() +} + +func (r ResourceInstance) Equal(o ResourceInstance) bool { + return r.String() == o.String() +} + +// Absolute returns an AbsResourceInstance from the receiver and the given module +// instance address. +func (r ResourceInstance) Absolute(module ModuleInstance) AbsResourceInstance { + return AbsResourceInstance{ + Module: module, + Resource: r, + } +} + +// AbsResource is an absolute address for a resource under a given module path. +type AbsResource struct { + targetable + Module ModuleInstance + Resource Resource +} + +// Resource returns the address of a particular resource within the receiver. +func (m ModuleInstance) Resource(mode ResourceMode, typeName string, name string) AbsResource { + return AbsResource{ + Module: m, + Resource: Resource{ + Mode: mode, + Type: typeName, + Name: name, + }, + } +} + +// Instance produces the address for a specific instance of the receiver +// that is idenfied by the given key. +func (r AbsResource) Instance(key InstanceKey) AbsResourceInstance { + return AbsResourceInstance{ + Module: r.Module, + Resource: r.Resource.Instance(key), + } +} + +// TargetContains implements Targetable by returning true if the given other +// address is either equal to the receiver or is an instance of the +// receiver. +func (r AbsResource) TargetContains(other Targetable) bool { + switch to := other.(type) { + + case AbsResource: + // We'll use our stringification as a cheat-ish way to test for equality. + return to.String() == r.String() + + case AbsResourceInstance: + return r.TargetContains(to.ContainingResource()) + + default: + return false + + } +} + +func (r AbsResource) String() string { + if len(r.Module) == 0 { + return r.Resource.String() + } + return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String()) +} + +func (r AbsResource) Equal(o AbsResource) bool { + return r.String() == o.String() +} + +// AbsResourceInstance is an absolute address for a resource instance under a +// given module path. +type AbsResourceInstance struct { + targetable + Module ModuleInstance + Resource ResourceInstance +} + +// ResourceInstance returns the address of a particular resource instance within the receiver. +func (m ModuleInstance) ResourceInstance(mode ResourceMode, typeName string, name string, key InstanceKey) AbsResourceInstance { + return AbsResourceInstance{ + Module: m, + Resource: ResourceInstance{ + Resource: Resource{ + Mode: mode, + Type: typeName, + Name: name, + }, + Key: key, + }, + } +} + +// ContainingResource returns the address of the resource that contains the +// receving resource instance. In other words, it discards the key portion +// of the address to produce an AbsResource value. +func (r AbsResourceInstance) ContainingResource() AbsResource { + return AbsResource{ + Module: r.Module, + Resource: r.Resource.ContainingResource(), + } +} + +// TargetContains implements Targetable by returning true if the given other +// address is equal to the receiver. +func (r AbsResourceInstance) TargetContains(other Targetable) bool { + switch to := other.(type) { + + case AbsResourceInstance: + // We'll use our stringification as a cheat-ish way to test for equality. + return to.String() == r.String() + + default: + return false + + } +} + +func (r AbsResourceInstance) String() string { + if len(r.Module) == 0 { + return r.Resource.String() + } + return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String()) +} + +func (r AbsResourceInstance) Equal(o AbsResourceInstance) bool { + return r.String() == o.String() +} + +// Less returns true if the receiver should sort before the given other value +// in a sorted list of addresses. +func (r AbsResourceInstance) Less(o AbsResourceInstance) bool { + switch { + + case len(r.Module) != len(o.Module): + return len(r.Module) < len(o.Module) + + case r.Module.String() != o.Module.String(): + return r.Module.Less(o.Module) + + case r.Resource.Resource.Mode != o.Resource.Resource.Mode: + return r.Resource.Resource.Mode == DataResourceMode + + case r.Resource.Resource.Type != o.Resource.Resource.Type: + return r.Resource.Resource.Type < o.Resource.Resource.Type + + case r.Resource.Resource.Name != o.Resource.Resource.Name: + return r.Resource.Resource.Name < o.Resource.Resource.Name + + case r.Resource.Key != o.Resource.Key: + return InstanceKeyLess(r.Resource.Key, o.Resource.Key) + + default: + return false + + } +} + +// ResourceMode defines which lifecycle applies to a given resource. Each +// resource lifecycle has a slightly different address format. +type ResourceMode rune + +//go:generate stringer -type ResourceMode + +const ( + // InvalidResourceMode is the zero value of ResourceMode and is not + // a valid resource mode. + InvalidResourceMode ResourceMode = 0 + + // ManagedResourceMode indicates a managed resource, as defined by + // "resource" blocks in configuration. + ManagedResourceMode ResourceMode = 'M' + + // DataResourceMode indicates a data resource, as defined by + // "data" blocks in configuration. + DataResourceMode ResourceMode = 'D' +) diff --git a/vendor/github.com/hashicorp/terraform/addrs/resource_phase.go b/vendor/github.com/hashicorp/terraform/addrs/resource_phase.go new file mode 100644 index 0000000000..9bdbdc421a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/resource_phase.go @@ -0,0 +1,105 @@ +package addrs + +import "fmt" + +// ResourceInstancePhase is a special kind of reference used only internally +// during graph building to represent resource instances that are in a +// non-primary state. +// +// Graph nodes can declare themselves referenceable via an instance phase +// or can declare that they reference an instance phase in order to accomodate +// secondary graph nodes dealing with, for example, destroy actions. +// +// This special reference type cannot be accessed directly by end-users, and +// should never be shown in the UI. +type ResourceInstancePhase struct { + referenceable + ResourceInstance ResourceInstance + Phase ResourceInstancePhaseType +} + +var _ Referenceable = ResourceInstancePhase{} + +// Phase returns a special "phase address" for the receving instance. See the +// documentation of ResourceInstancePhase for the limited situations where this +// is intended to be used. +func (r ResourceInstance) Phase(rpt ResourceInstancePhaseType) ResourceInstancePhase { + return ResourceInstancePhase{ + ResourceInstance: r, + Phase: rpt, + } +} + +// ContainingResource returns an address for the same phase of the resource +// that this instance belongs to. +func (rp ResourceInstancePhase) ContainingResource() ResourcePhase { + return rp.ResourceInstance.Resource.Phase(rp.Phase) +} + +func (rp ResourceInstancePhase) String() string { + // We use a different separator here than usual to ensure that we'll + // never conflict with any non-phased resource instance string. This + // is intentionally something that would fail parsing with ParseRef, + // because this special address type should never be exposed in the UI. + return fmt.Sprintf("%s#%s", rp.ResourceInstance, rp.Phase) +} + +// ResourceInstancePhaseType is an enumeration used with ResourceInstancePhase. +type ResourceInstancePhaseType string + +const ( + // ResourceInstancePhaseDestroy represents the "destroy" phase of a + // resource instance. + ResourceInstancePhaseDestroy ResourceInstancePhaseType = "destroy" + + // ResourceInstancePhaseDestroyCBD is similar to ResourceInstancePhaseDestroy + // but is used for resources that have "create_before_destroy" set, thus + // requiring a different dependency ordering. + ResourceInstancePhaseDestroyCBD ResourceInstancePhaseType = "destroy-cbd" +) + +func (rpt ResourceInstancePhaseType) String() string { + return string(rpt) +} + +// ResourcePhase is a special kind of reference used only internally +// during graph building to represent resources that are in a +// non-primary state. +// +// Graph nodes can declare themselves referenceable via a resource phase +// or can declare that they reference a resource phase in order to accomodate +// secondary graph nodes dealing with, for example, destroy actions. +// +// Since resources (as opposed to instances) aren't actually phased, this +// address type is used only as an approximation during initial construction +// of the resource-oriented plan graph, under the assumption that resource +// instances with ResourceInstancePhase addresses will be created in dynamic +// subgraphs during the graph walk. +// +// This special reference type cannot be accessed directly by end-users, and +// should never be shown in the UI. +type ResourcePhase struct { + referenceable + Resource Resource + Phase ResourceInstancePhaseType +} + +var _ Referenceable = ResourcePhase{} + +// Phase returns a special "phase address" for the receving instance. See the +// documentation of ResourceInstancePhase for the limited situations where this +// is intended to be used. +func (r Resource) Phase(rpt ResourceInstancePhaseType) ResourcePhase { + return ResourcePhase{ + Resource: r, + Phase: rpt, + } +} + +func (rp ResourcePhase) String() string { + // We use a different separator here than usual to ensure that we'll + // never conflict with any non-phased resource instance string. This + // is intentionally something that would fail parsing with ParseRef, + // because this special address type should never be exposed in the UI. + return fmt.Sprintf("%s#%s", rp.Resource, rp.Phase) +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/resourcemode_string.go b/vendor/github.com/hashicorp/terraform/addrs/resourcemode_string.go new file mode 100644 index 0000000000..56e99adfa8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/resourcemode_string.go @@ -0,0 +1,24 @@ +// Code generated by "stringer -type ResourceMode"; DO NOT EDIT. + +package addrs + +import "strconv" + +const ( + _ResourceMode_name_0 = "InvalidResourceMode" + _ResourceMode_name_1 = "DataResourceMode" + _ResourceMode_name_2 = "ManagedResourceMode" +) + +func (i ResourceMode) String() string { + switch { + case i == 0: + return _ResourceMode_name_0 + case i == 68: + return _ResourceMode_name_1 + case i == 77: + return _ResourceMode_name_2 + default: + return "ResourceMode(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/self.go b/vendor/github.com/hashicorp/terraform/addrs/self.go new file mode 100644 index 0000000000..7f24eaf085 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/self.go @@ -0,0 +1,14 @@ +package addrs + +// Self is the address of the special object "self" that behaves as an alias +// for a containing object currently in scope. +const Self selfT = 0 + +type selfT int + +func (s selfT) referenceableSigil() { +} + +func (s selfT) String() string { + return "self" +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/targetable.go b/vendor/github.com/hashicorp/terraform/addrs/targetable.go new file mode 100644 index 0000000000..16819a5afb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/targetable.go @@ -0,0 +1,26 @@ +package addrs + +// Targetable is an interface implemented by all address types that can be +// used as "targets" for selecting sub-graphs of a graph. +type Targetable interface { + targetableSigil() + + // TargetContains returns true if the receiver is considered to contain + // the given other address. Containment, for the purpose of targeting, + // means that if a container address is targeted then all of the + // addresses within it are also implicitly targeted. + // + // A targetable address always contains at least itself. + TargetContains(other Targetable) bool + + // String produces a string representation of the address that could be + // parsed as a HCL traversal and passed to ParseTarget to produce an + // identical result. + String() string +} + +type targetable struct { +} + +func (r targetable) targetableSigil() { +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/terraform_attr.go b/vendor/github.com/hashicorp/terraform/addrs/terraform_attr.go new file mode 100644 index 0000000000..a880182ae2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/terraform_attr.go @@ -0,0 +1,12 @@ +package addrs + +// TerraformAttr is the address of an attribute of the "terraform" object in +// the interpolation scope, like "terraform.workspace". +type TerraformAttr struct { + referenceable + Name string +} + +func (ta TerraformAttr) String() string { + return "terraform." + ta.Name +} diff --git a/vendor/github.com/hashicorp/terraform/command/format/diagnostic.go b/vendor/github.com/hashicorp/terraform/command/format/diagnostic.go new file mode 100644 index 0000000000..3dd9238dd8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/format/diagnostic.go @@ -0,0 +1,295 @@ +package format + +import ( + "bufio" + "bytes" + "fmt" + "sort" + "strings" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcled" + "github.com/hashicorp/hcl2/hclparse" + "github.com/hashicorp/terraform/tfdiags" + "github.com/mitchellh/colorstring" + wordwrap "github.com/mitchellh/go-wordwrap" + "github.com/zclconf/go-cty/cty" +) + +// Diagnostic formats a single diagnostic message. +// +// The width argument specifies at what column the diagnostic messages will +// be wrapped. If set to zero, messages will not be wrapped by this function +// at all. Although the long-form text parts of the message are wrapped, +// not all aspects of the message are guaranteed to fit within the specified +// terminal width. +func Diagnostic(diag tfdiags.Diagnostic, sources map[string][]byte, color *colorstring.Colorize, width int) string { + if diag == nil { + // No good reason to pass a nil diagnostic in here... + return "" + } + + var buf bytes.Buffer + + switch diag.Severity() { + case tfdiags.Error: + buf.WriteString(color.Color("\n[bold][red]Error: [reset]")) + case tfdiags.Warning: + buf.WriteString(color.Color("\n[bold][yellow]Warning: [reset]")) + default: + // Clear out any coloring that might be applied by Terraform's UI helper, + // so our result is not context-sensitive. + buf.WriteString(color.Color("\n[reset]")) + } + + desc := diag.Description() + sourceRefs := diag.Source() + + // We don't wrap the summary, since we expect it to be terse, and since + // this is where we put the text of a native Go error it may not always + // be pure text that lends itself well to word-wrapping. + fmt.Fprintf(&buf, color.Color("[bold]%s[reset]\n\n"), desc.Summary) + + if sourceRefs.Subject != nil { + // We'll borrow HCL's range implementation here, because it has some + // handy features to help us produce a nice source code snippet. + highlightRange := sourceRefs.Subject.ToHCL() + snippetRange := highlightRange + if sourceRefs.Context != nil { + snippetRange = sourceRefs.Context.ToHCL() + } + + // Make sure the snippet includes the highlight. This should be true + // for any reasonable diagnostic, but we'll make sure. + snippetRange = hcl.RangeOver(snippetRange, highlightRange) + if snippetRange.Empty() { + snippetRange.End.Byte++ + snippetRange.End.Column++ + } + if highlightRange.Empty() { + highlightRange.End.Byte++ + highlightRange.End.Column++ + } + + var src []byte + if sources != nil { + src = sources[snippetRange.Filename] + } + if src == nil { + // This should generally not happen, as long as sources are always + // loaded through the main loader. We may load things in other + // ways in weird cases, so we'll tolerate it at the expense of + // a not-so-helpful error message. + fmt.Fprintf(&buf, " on %s line %d:\n (source code not available)\n", highlightRange.Filename, highlightRange.Start.Line) + } else { + file, offset := parseRange(src, highlightRange) + + headerRange := highlightRange + + contextStr := hcled.ContextString(file, offset-1) + if contextStr != "" { + contextStr = ", in " + contextStr + } + + fmt.Fprintf(&buf, " on %s line %d%s:\n", headerRange.Filename, headerRange.Start.Line, contextStr) + + // Config snippet rendering + sc := hcl.NewRangeScanner(src, highlightRange.Filename, bufio.ScanLines) + for sc.Scan() { + lineRange := sc.Range() + if !lineRange.Overlaps(snippetRange) { + continue + } + beforeRange, highlightedRange, afterRange := lineRange.PartitionAround(highlightRange) + before := beforeRange.SliceBytes(src) + highlighted := highlightedRange.SliceBytes(src) + after := afterRange.SliceBytes(src) + fmt.Fprintf( + &buf, color.Color("%4d: %s[underline]%s[reset]%s\n"), + lineRange.Start.Line, + before, highlighted, after, + ) + } + + } + + if fromExpr := diag.FromExpr(); fromExpr != nil { + // We may also be able to generate information about the dynamic + // values of relevant variables at the point of evaluation, then. + // This is particularly useful for expressions that get evaluated + // multiple times with different values, such as blocks using + // "count" and "for_each", or within "for" expressions. + expr := fromExpr.Expression + ctx := fromExpr.EvalContext + vars := expr.Variables() + stmts := make([]string, 0, len(vars)) + seen := make(map[string]struct{}, len(vars)) + Traversals: + for _, traversal := range vars { + for len(traversal) > 1 { + val, diags := traversal.TraverseAbs(ctx) + if diags.HasErrors() { + // Skip anything that generates errors, since we probably + // already have the same error in our diagnostics set + // already. + traversal = traversal[:len(traversal)-1] + continue + } + + traversalStr := traversalStr(traversal) + if _, exists := seen[traversalStr]; exists { + continue Traversals // don't show duplicates when the same variable is referenced multiple times + } + switch { + case !val.IsKnown(): + // Can't say anything about this yet, then. + continue Traversals + case val.IsNull(): + stmts = append(stmts, fmt.Sprintf(color.Color("[bold]%s[reset] is null"), traversalStr)) + default: + stmts = append(stmts, fmt.Sprintf(color.Color("[bold]%s[reset] is %s"), traversalStr, compactValueStr(val))) + } + seen[traversalStr] = struct{}{} + } + } + + sort.Strings(stmts) // FIXME: Should maybe use a traversal-aware sort that can sort numeric indexes properly? + + if len(stmts) > 0 { + fmt.Fprint(&buf, color.Color(" [dark_gray]|----------------[reset]\n")) + } + for _, stmt := range stmts { + fmt.Fprintf(&buf, color.Color(" [dark_gray]|[reset] %s\n"), stmt) + } + } + + buf.WriteByte('\n') + } + + if desc.Detail != "" { + detail := desc.Detail + if width != 0 { + detail = wordwrap.WrapString(detail, uint(width)) + } + fmt.Fprintf(&buf, "%s\n", detail) + } + + return buf.String() +} + +func parseRange(src []byte, rng hcl.Range) (*hcl.File, int) { + filename := rng.Filename + offset := rng.Start.Byte + + // We need to re-parse here to get a *hcl.File we can interrogate. This + // is not awesome since we presumably already parsed the file earlier too, + // but this re-parsing is architecturally simpler than retaining all of + // the hcl.File objects and we only do this in the case of an error anyway + // so the overhead here is not a big problem. + parser := hclparse.NewParser() + var file *hcl.File + var diags hcl.Diagnostics + if strings.HasSuffix(filename, ".json") { + file, diags = parser.ParseJSON(src, filename) + } else { + file, diags = parser.ParseHCL(src, filename) + } + if diags.HasErrors() { + return file, offset + } + + return file, offset +} + +// traversalStr produces a representation of an HCL traversal that is compact, +// resembles HCL native syntax, and is suitable for display in the UI. +func traversalStr(traversal hcl.Traversal) string { + // This is a specialized subset of traversal rendering tailored to + // producing helpful contextual messages in diagnostics. It is not + // comprehensive nor intended to be used for other purposes. + + var buf bytes.Buffer + for _, step := range traversal { + switch tStep := step.(type) { + case hcl.TraverseRoot: + buf.WriteString(tStep.Name) + case hcl.TraverseAttr: + buf.WriteByte('.') + buf.WriteString(tStep.Name) + case hcl.TraverseIndex: + buf.WriteByte('[') + if keyTy := tStep.Key.Type(); keyTy.IsPrimitiveType() { + buf.WriteString(compactValueStr(tStep.Key)) + } else { + // We'll just use a placeholder for more complex values, + // since otherwise our result could grow ridiculously long. + buf.WriteString("...") + } + buf.WriteByte(']') + } + } + return buf.String() +} + +// compactValueStr produces a compact, single-line summary of a given value +// that is suitable for display in the UI. +// +// For primitives it returns a full representation, while for more complex +// types it instead summarizes the type, size, etc to produce something +// that is hopefully still somewhat useful but not as verbose as a rendering +// of the entire data structure. +func compactValueStr(val cty.Value) string { + // This is a specialized subset of value rendering tailored to producing + // helpful but concise messages in diagnostics. It is not comprehensive + // nor intended to be used for other purposes. + + ty := val.Type() + switch { + case val.IsNull(): + return "null" + case !val.IsKnown(): + // Should never happen here because we should filter before we get + // in here, but we'll do something reasonable rather than panic. + return "(not yet known)" + case ty == cty.Bool: + if val.True() { + return "true" + } + return "false" + case ty == cty.Number: + bf := val.AsBigFloat() + return bf.Text('g', 10) + case ty == cty.String: + // Go string syntax is not exactly the same as HCL native string syntax, + // but we'll accept the minor edge-cases where this is different here + // for now, just to get something reasonable here. + return fmt.Sprintf("%q", val.AsString()) + case ty.IsCollectionType() || ty.IsTupleType(): + l := val.LengthInt() + switch l { + case 0: + return "empty " + ty.FriendlyName() + case 1: + return ty.FriendlyName() + " with 1 element" + default: + return fmt.Sprintf("%s with %d elements", ty.FriendlyName(), l) + } + case ty.IsObjectType(): + atys := ty.AttributeTypes() + l := len(atys) + switch l { + case 0: + return "object with no attributes" + case 1: + var name string + for k := range atys { + name = k + } + return fmt.Sprintf("object with 1 attribute %q", name) + default: + return fmt.Sprintf("object with %d attributes", l) + } + default: + return ty.FriendlyName() + } +} diff --git a/vendor/github.com/hashicorp/terraform/command/format/diff.go b/vendor/github.com/hashicorp/terraform/command/format/diff.go new file mode 100644 index 0000000000..9d6a9f1dec --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/format/diff.go @@ -0,0 +1,1192 @@ +package format + +import ( + "bufio" + "bytes" + "fmt" + "sort" + "strings" + + "github.com/mitchellh/colorstring" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/plans/objchange" + "github.com/hashicorp/terraform/states" +) + +// ResourceChange returns a string representation of a change to a particular +// resource, for inclusion in user-facing plan output. +// +// The resource schema must be provided along with the change so that the +// formatted change can reflect the configuration structure for the associated +// resource. +// +// If "color" is non-nil, it will be used to color the result. Otherwise, +// no color codes will be included. +func ResourceChange( + change *plans.ResourceInstanceChangeSrc, + tainted bool, + schema *configschema.Block, + color *colorstring.Colorize, +) string { + addr := change.Addr + var buf bytes.Buffer + + if color == nil { + color = &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Disable: true, + Reset: false, + } + } + + dispAddr := addr.String() + if change.DeposedKey != states.NotDeposed { + dispAddr = fmt.Sprintf("%s (deposed object %s)", dispAddr, change.DeposedKey) + } + + switch change.Action { + case plans.Create: + buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be created", dispAddr))) + case plans.Read: + buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be read during apply\n # (config refers to values not yet known)", dispAddr))) + case plans.Update: + buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be updated in-place", dispAddr))) + case plans.CreateThenDelete, plans.DeleteThenCreate: + if tainted { + buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] is tainted, so must be [bold][red]replaced", dispAddr))) + } else { + buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] must be [bold][red]replaced", dispAddr))) + } + case plans.Delete: + buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be [bold][red]destroyed", dispAddr))) + default: + // should never happen, since the above is exhaustive + buf.WriteString(fmt.Sprintf("%s has an action the plan renderer doesn't support (this is a bug)", dispAddr)) + } + buf.WriteString(color.Color("[reset]\n")) + + switch change.Action { + case plans.Create: + buf.WriteString(color.Color("[green] +[reset] ")) + case plans.Read: + buf.WriteString(color.Color("[cyan] <=[reset] ")) + case plans.Update: + buf.WriteString(color.Color("[yellow] ~[reset] ")) + case plans.DeleteThenCreate: + buf.WriteString(color.Color("[red]-[reset]/[green]+[reset] ")) + case plans.CreateThenDelete: + buf.WriteString(color.Color("[green]+[reset]/[red]-[reset] ")) + case plans.Delete: + buf.WriteString(color.Color("[red] -[reset] ")) + default: + buf.WriteString(color.Color("??? ")) + } + + switch addr.Resource.Resource.Mode { + case addrs.ManagedResourceMode: + buf.WriteString(fmt.Sprintf( + "resource %q %q", + addr.Resource.Resource.Type, + addr.Resource.Resource.Name, + )) + case addrs.DataResourceMode: + buf.WriteString(fmt.Sprintf( + "data %q %q ", + addr.Resource.Resource.Type, + addr.Resource.Resource.Name, + )) + default: + // should never happen, since the above is exhaustive + buf.WriteString(addr.String()) + } + + buf.WriteString(" {") + + p := blockBodyDiffPrinter{ + buf: &buf, + color: color, + action: change.Action, + requiredReplace: change.RequiredReplace, + } + + // Most commonly-used resources have nested blocks that result in us + // going at least three traversals deep while we recurse here, so we'll + // start with that much capacity and then grow as needed for deeper + // structures. + path := make(cty.Path, 0, 3) + + changeV, err := change.Decode(schema.ImpliedType()) + if err != nil { + // Should never happen in here, since we've already been through + // loads of layers of encode/decode of the planned changes before now. + panic(fmt.Sprintf("failed to decode plan for %s while rendering diff: %s", addr, err)) + } + + // We currently have an opt-out that permits the legacy SDK to return values + // that defy our usual conventions around handling of nesting blocks. To + // avoid the rendering code from needing to handle all of these, we'll + // normalize first. + // (Ideally we'd do this as part of the SDK opt-out implementation in core, + // but we've added it here for now to reduce risk of unexpected impacts + // on other code in core.) + changeV.Change.Before = objchange.NormalizeObjectFromLegacySDK(changeV.Change.Before, schema) + changeV.Change.After = objchange.NormalizeObjectFromLegacySDK(changeV.Change.After, schema) + + bodyWritten := p.writeBlockBodyDiff(schema, changeV.Before, changeV.After, 6, path) + if bodyWritten { + buf.WriteString("\n") + buf.WriteString(strings.Repeat(" ", 4)) + } + buf.WriteString("}\n") + + return buf.String() +} + +type blockBodyDiffPrinter struct { + buf *bytes.Buffer + color *colorstring.Colorize + action plans.Action + requiredReplace cty.PathSet +} + +const forcesNewResourceCaption = " [red]# forces replacement[reset]" + +// writeBlockBodyDiff writes attribute or block differences +// and returns true if any differences were found and written +func (p *blockBodyDiffPrinter) writeBlockBodyDiff(schema *configschema.Block, old, new cty.Value, indent int, path cty.Path) bool { + path = ctyEnsurePathCapacity(path, 1) + + bodyWritten := false + blankBeforeBlocks := false + { + attrNames := make([]string, 0, len(schema.Attributes)) + attrNameLen := 0 + for name := range schema.Attributes { + oldVal := ctyGetAttrMaybeNull(old, name) + newVal := ctyGetAttrMaybeNull(new, name) + if oldVal.IsNull() && newVal.IsNull() { + // Skip attributes where both old and new values are null + // (we do this early here so that we'll do our value alignment + // based on the longest attribute name that has a change, rather + // than the longest attribute name in the full set.) + continue + } + + attrNames = append(attrNames, name) + if len(name) > attrNameLen { + attrNameLen = len(name) + } + } + sort.Strings(attrNames) + if len(attrNames) > 0 { + blankBeforeBlocks = true + } + + for _, name := range attrNames { + attrS := schema.Attributes[name] + oldVal := ctyGetAttrMaybeNull(old, name) + newVal := ctyGetAttrMaybeNull(new, name) + + bodyWritten = true + p.writeAttrDiff(name, attrS, oldVal, newVal, attrNameLen, indent, path) + } + } + + { + blockTypeNames := make([]string, 0, len(schema.BlockTypes)) + for name := range schema.BlockTypes { + blockTypeNames = append(blockTypeNames, name) + } + sort.Strings(blockTypeNames) + + for _, name := range blockTypeNames { + blockS := schema.BlockTypes[name] + oldVal := ctyGetAttrMaybeNull(old, name) + newVal := ctyGetAttrMaybeNull(new, name) + + bodyWritten = true + p.writeNestedBlockDiffs(name, blockS, oldVal, newVal, blankBeforeBlocks, indent, path) + + // Always include a blank for any subsequent block types. + blankBeforeBlocks = true + } + } + + return bodyWritten +} + +func (p *blockBodyDiffPrinter) writeAttrDiff(name string, attrS *configschema.Attribute, old, new cty.Value, nameLen, indent int, path cty.Path) { + path = append(path, cty.GetAttrStep{Name: name}) + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent)) + showJustNew := false + var action plans.Action + switch { + case old.IsNull(): + action = plans.Create + showJustNew = true + case new.IsNull(): + action = plans.Delete + case ctyEqualWithUnknown(old, new): + action = plans.NoOp + showJustNew = true + default: + action = plans.Update + } + + p.writeActionSymbol(action) + + p.buf.WriteString(p.color.Color("[bold]")) + p.buf.WriteString(name) + p.buf.WriteString(p.color.Color("[reset]")) + p.buf.WriteString(strings.Repeat(" ", nameLen-len(name))) + p.buf.WriteString(" = ") + + if attrS.Sensitive { + p.buf.WriteString("(sensitive value)") + } else { + switch { + case showJustNew: + p.writeValue(new, action, indent+2) + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + default: + // We show new even if it is null to emphasize the fact + // that it is being unset, since otherwise it is easy to + // misunderstand that the value is still set to the old value. + p.writeValueDiff(old, new, indent+2, path) + } + } +} + +func (p *blockBodyDiffPrinter) writeNestedBlockDiffs(name string, blockS *configschema.NestedBlock, old, new cty.Value, blankBefore bool, indent int, path cty.Path) { + path = append(path, cty.GetAttrStep{Name: name}) + if old.IsNull() && new.IsNull() { + // Nothing to do if both old and new is null + return + } + + // Where old/new are collections representing a nesting mode other than + // NestingSingle, we assume the collection value can never be unknown + // since we always produce the container for the nested objects, even if + // the objects within are computed. + + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + var action plans.Action + eqV := new.Equals(old) + switch { + case old.IsNull(): + action = plans.Create + case new.IsNull(): + action = plans.Delete + case !new.IsWhollyKnown() || !old.IsWhollyKnown(): + // "old" should actually always be known due to our contract + // that old values must never be unknown, but we'll allow it + // anyway to be robust. + action = plans.Update + case !eqV.IsKnown() || !eqV.True(): + action = plans.Update + } + + if blankBefore { + p.buf.WriteRune('\n') + } + p.writeNestedBlockDiff(name, nil, &blockS.Block, action, old, new, indent, path) + case configschema.NestingList: + // For the sake of handling nested blocks, we'll treat a null list + // the same as an empty list since the config language doesn't + // distinguish these anyway. + old = ctyNullBlockListAsEmpty(old) + new = ctyNullBlockListAsEmpty(new) + + oldItems := ctyCollectionValues(old) + newItems := ctyCollectionValues(new) + + // Here we intentionally preserve the index-based correspondance + // between old and new, rather than trying to detect insertions + // and removals in the list, because this more accurately reflects + // how Terraform Core and providers will understand the change, + // particularly when the nested block contains computed attributes + // that will themselves maintain correspondance by index. + + // commonLen is number of elements that exist in both lists, which + // will be presented as updates (~). Any additional items in one + // of the lists will be presented as either creates (+) or deletes (-) + // depending on which list they belong to. + var commonLen int + switch { + case len(oldItems) < len(newItems): + commonLen = len(oldItems) + default: + commonLen = len(newItems) + } + + if blankBefore && (len(oldItems) > 0 || len(newItems) > 0) { + p.buf.WriteRune('\n') + } + + for i := 0; i < commonLen; i++ { + path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) + oldItem := oldItems[i] + newItem := newItems[i] + action := plans.Update + if oldItem.RawEquals(newItem) { + action = plans.NoOp + } + p.writeNestedBlockDiff(name, nil, &blockS.Block, action, oldItem, newItem, indent, path) + } + for i := commonLen; i < len(oldItems); i++ { + path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) + oldItem := oldItems[i] + newItem := cty.NullVal(oldItem.Type()) + p.writeNestedBlockDiff(name, nil, &blockS.Block, plans.Delete, oldItem, newItem, indent, path) + } + for i := commonLen; i < len(newItems); i++ { + path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) + newItem := newItems[i] + oldItem := cty.NullVal(newItem.Type()) + p.writeNestedBlockDiff(name, nil, &blockS.Block, plans.Create, oldItem, newItem, indent, path) + } + case configschema.NestingSet: + // For the sake of handling nested blocks, we'll treat a null set + // the same as an empty set since the config language doesn't + // distinguish these anyway. + old = ctyNullBlockSetAsEmpty(old) + new = ctyNullBlockSetAsEmpty(new) + + oldItems := ctyCollectionValues(old) + newItems := ctyCollectionValues(new) + + if (len(oldItems) + len(newItems)) == 0 { + // Nothing to do if both sets are empty + return + } + + allItems := make([]cty.Value, 0, len(oldItems)+len(newItems)) + allItems = append(allItems, oldItems...) + allItems = append(allItems, newItems...) + all := cty.SetVal(allItems) + + if blankBefore { + p.buf.WriteRune('\n') + } + + for it := all.ElementIterator(); it.Next(); { + _, val := it.Element() + var action plans.Action + var oldValue, newValue cty.Value + switch { + case !val.IsKnown(): + action = plans.Update + newValue = val + case !old.HasElement(val).True(): + action = plans.Create + oldValue = cty.NullVal(val.Type()) + newValue = val + case !new.HasElement(val).True(): + action = plans.Delete + oldValue = val + newValue = cty.NullVal(val.Type()) + default: + action = plans.NoOp + oldValue = val + newValue = val + } + path := append(path, cty.IndexStep{Key: val}) + p.writeNestedBlockDiff(name, nil, &blockS.Block, action, oldValue, newValue, indent, path) + } + + case configschema.NestingMap: + // For the sake of handling nested blocks, we'll treat a null map + // the same as an empty map since the config language doesn't + // distinguish these anyway. + old = ctyNullBlockMapAsEmpty(old) + new = ctyNullBlockMapAsEmpty(new) + + oldItems := old.AsValueMap() + newItems := new.AsValueMap() + if (len(oldItems) + len(newItems)) == 0 { + // Nothing to do if both maps are empty + return + } + + allKeys := make(map[string]bool) + for k := range oldItems { + allKeys[k] = true + } + for k := range newItems { + allKeys[k] = true + } + allKeysOrder := make([]string, 0, len(allKeys)) + for k := range allKeys { + allKeysOrder = append(allKeysOrder, k) + } + sort.Strings(allKeysOrder) + + if blankBefore { + p.buf.WriteRune('\n') + } + + for _, k := range allKeysOrder { + var action plans.Action + oldValue := oldItems[k] + newValue := newItems[k] + switch { + case oldValue == cty.NilVal: + oldValue = cty.NullVal(newValue.Type()) + action = plans.Create + case newValue == cty.NilVal: + newValue = cty.NullVal(oldValue.Type()) + action = plans.Delete + case !newValue.RawEquals(oldValue): + action = plans.Update + default: + action = plans.NoOp + } + + path := append(path, cty.IndexStep{Key: cty.StringVal(k)}) + p.writeNestedBlockDiff(name, &k, &blockS.Block, action, oldValue, newValue, indent, path) + } + } +} + +func (p *blockBodyDiffPrinter) writeNestedBlockDiff(name string, label *string, blockS *configschema.Block, action plans.Action, old, new cty.Value, indent int, path cty.Path) { + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent)) + p.writeActionSymbol(action) + + if label != nil { + fmt.Fprintf(p.buf, "%s %q {", name, *label) + } else { + fmt.Fprintf(p.buf, "%s {", name) + } + + if action != plans.NoOp && (p.pathForcesNewResource(path) || p.pathForcesNewResource(path[:len(path)-1])) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + + bodyWritten := p.writeBlockBodyDiff(blockS, old, new, indent+4, path) + if bodyWritten { + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent+2)) + } + p.buf.WriteString("}") +} + +func (p *blockBodyDiffPrinter) writeValue(val cty.Value, action plans.Action, indent int) { + if !val.IsKnown() { + p.buf.WriteString("(known after apply)") + return + } + if val.IsNull() { + p.buf.WriteString(p.color.Color("[dark_gray]null[reset]")) + return + } + + ty := val.Type() + + switch { + case ty.IsPrimitiveType(): + switch ty { + case cty.String: + { + // Special behavior for JSON strings containing array or object + src := []byte(val.AsString()) + ty, err := ctyjson.ImpliedType(src) + // check for the special case of "null", which decodes to nil, + // and just allow it to be printed out directly + if err == nil && !ty.IsPrimitiveType() && val.AsString() != "null" { + jv, err := ctyjson.Unmarshal(src, ty) + if err == nil { + p.buf.WriteString("jsonencode(") + if jv.LengthInt() == 0 { + p.writeValue(jv, action, 0) + } else { + p.buf.WriteByte('\n') + p.buf.WriteString(strings.Repeat(" ", indent+4)) + p.writeValue(jv, action, indent+4) + p.buf.WriteByte('\n') + p.buf.WriteString(strings.Repeat(" ", indent)) + } + p.buf.WriteByte(')') + break // don't *also* do the normal behavior below + } + } + } + fmt.Fprintf(p.buf, "%q", val.AsString()) + case cty.Bool: + if val.True() { + p.buf.WriteString("true") + } else { + p.buf.WriteString("false") + } + case cty.Number: + bf := val.AsBigFloat() + p.buf.WriteString(bf.Text('f', -1)) + default: + // should never happen, since the above is exhaustive + fmt.Fprintf(p.buf, "%#v", val) + } + case ty.IsListType() || ty.IsSetType() || ty.IsTupleType(): + p.buf.WriteString("[") + + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(action) + p.writeValue(val, action, indent+4) + p.buf.WriteString(",") + } + + if val.LengthInt() > 0 { + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent)) + } + p.buf.WriteString("]") + case ty.IsMapType(): + p.buf.WriteString("{") + + keyLen := 0 + for it := val.ElementIterator(); it.Next(); { + key, _ := it.Element() + if keyStr := key.AsString(); len(keyStr) > keyLen { + keyLen = len(keyStr) + } + } + + for it := val.ElementIterator(); it.Next(); { + key, val := it.Element() + + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(action) + p.writeValue(key, action, indent+4) + p.buf.WriteString(strings.Repeat(" ", keyLen-len(key.AsString()))) + p.buf.WriteString(" = ") + p.writeValue(val, action, indent+4) + } + + if val.LengthInt() > 0 { + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent)) + } + p.buf.WriteString("}") + case ty.IsObjectType(): + p.buf.WriteString("{") + + atys := ty.AttributeTypes() + attrNames := make([]string, 0, len(atys)) + nameLen := 0 + for attrName := range atys { + attrNames = append(attrNames, attrName) + if len(attrName) > nameLen { + nameLen = len(attrName) + } + } + sort.Strings(attrNames) + + for _, attrName := range attrNames { + val := val.GetAttr(attrName) + + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(action) + p.buf.WriteString(attrName) + p.buf.WriteString(strings.Repeat(" ", nameLen-len(attrName))) + p.buf.WriteString(" = ") + p.writeValue(val, action, indent+4) + } + + if len(attrNames) > 0 { + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent)) + } + p.buf.WriteString("}") + } +} + +func (p *blockBodyDiffPrinter) writeValueDiff(old, new cty.Value, indent int, path cty.Path) { + ty := old.Type() + typesEqual := ctyTypesEqual(ty, new.Type()) + + // We have some specialized diff implementations for certain complex + // values where it's useful to see a visualization of the diff of + // the nested elements rather than just showing the entire old and + // new values verbatim. + // However, these specialized implementations can apply only if both + // values are known and non-null. + if old.IsKnown() && new.IsKnown() && !old.IsNull() && !new.IsNull() && typesEqual { + switch { + case ty == cty.String: + // We have special behavior for both multi-line strings in general + // and for strings that can parse as JSON. For the JSON handling + // to apply, both old and new must be valid JSON. + // For single-line strings that don't parse as JSON we just fall + // out of this switch block and do the default old -> new rendering. + oldS := old.AsString() + newS := new.AsString() + + { + // Special behavior for JSON strings containing object or + // list values. + oldBytes := []byte(oldS) + newBytes := []byte(newS) + oldType, oldErr := ctyjson.ImpliedType(oldBytes) + newType, newErr := ctyjson.ImpliedType(newBytes) + if oldErr == nil && newErr == nil && !(oldType.IsPrimitiveType() && newType.IsPrimitiveType()) { + oldJV, oldErr := ctyjson.Unmarshal(oldBytes, oldType) + newJV, newErr := ctyjson.Unmarshal(newBytes, newType) + if oldErr == nil && newErr == nil { + if !oldJV.RawEquals(newJV) { // two JSON values may differ only in insignificant whitespace + p.buf.WriteString("jsonencode(") + p.buf.WriteByte('\n') + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(plans.Update) + p.writeValueDiff(oldJV, newJV, indent+4, path) + p.buf.WriteByte('\n') + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteByte(')') + } else { + // if they differ only in insigificant whitespace + // then we'll note that but still expand out the + // effective value. + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color("jsonencode( [red]# whitespace changes force replacement[reset]")) + } else { + p.buf.WriteString(p.color.Color("jsonencode( [dim]# whitespace changes[reset]")) + } + p.buf.WriteByte('\n') + p.buf.WriteString(strings.Repeat(" ", indent+4)) + p.writeValue(oldJV, plans.NoOp, indent+4) + p.buf.WriteByte('\n') + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteByte(')') + } + return + } + } + } + + if strings.Index(oldS, "\n") < 0 && strings.Index(newS, "\n") < 0 { + break + } + + p.buf.WriteString("<<~EOT") + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + p.buf.WriteString("\n") + + var oldLines, newLines []cty.Value + { + r := strings.NewReader(oldS) + sc := bufio.NewScanner(r) + for sc.Scan() { + oldLines = append(oldLines, cty.StringVal(sc.Text())) + } + } + { + r := strings.NewReader(newS) + sc := bufio.NewScanner(r) + for sc.Scan() { + newLines = append(newLines, cty.StringVal(sc.Text())) + } + } + + diffLines := ctySequenceDiff(oldLines, newLines) + for _, diffLine := range diffLines { + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(diffLine.Action) + + switch diffLine.Action { + case plans.NoOp, plans.Delete: + p.buf.WriteString(diffLine.Before.AsString()) + case plans.Create: + p.buf.WriteString(diffLine.After.AsString()) + default: + // Should never happen since the above covers all + // actions that ctySequenceDiff can return for strings + p.buf.WriteString(diffLine.After.AsString()) + + } + p.buf.WriteString("\n") + } + + p.buf.WriteString(strings.Repeat(" ", indent)) // +4 here because there's no symbol + p.buf.WriteString("EOT") + + return + + case ty.IsSetType(): + p.buf.WriteString("[") + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + p.buf.WriteString("\n") + + var addedVals, removedVals, allVals []cty.Value + for it := old.ElementIterator(); it.Next(); { + _, val := it.Element() + allVals = append(allVals, val) + if new.HasElement(val).False() { + removedVals = append(removedVals, val) + } + } + for it := new.ElementIterator(); it.Next(); { + _, val := it.Element() + allVals = append(allVals, val) + if val.IsKnown() && old.HasElement(val).False() { + addedVals = append(addedVals, val) + } + } + + var all, added, removed cty.Value + if len(allVals) > 0 { + all = cty.SetVal(allVals) + } else { + all = cty.SetValEmpty(ty.ElementType()) + } + if len(addedVals) > 0 { + added = cty.SetVal(addedVals) + } else { + added = cty.SetValEmpty(ty.ElementType()) + } + if len(removedVals) > 0 { + removed = cty.SetVal(removedVals) + } else { + removed = cty.SetValEmpty(ty.ElementType()) + } + + for it := all.ElementIterator(); it.Next(); { + _, val := it.Element() + + p.buf.WriteString(strings.Repeat(" ", indent+2)) + + var action plans.Action + switch { + case !val.IsKnown(): + action = plans.Update + case added.HasElement(val).True(): + action = plans.Create + case removed.HasElement(val).True(): + action = plans.Delete + default: + action = plans.NoOp + } + + p.writeActionSymbol(action) + p.writeValue(val, action, indent+4) + p.buf.WriteString(",\n") + } + + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteString("]") + return + case ty.IsListType() || ty.IsTupleType(): + p.buf.WriteString("[") + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + p.buf.WriteString("\n") + + elemDiffs := ctySequenceDiff(old.AsValueSlice(), new.AsValueSlice()) + for _, elemDiff := range elemDiffs { + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(elemDiff.Action) + switch elemDiff.Action { + case plans.NoOp, plans.Delete: + p.writeValue(elemDiff.Before, elemDiff.Action, indent+4) + case plans.Update: + p.writeValueDiff(elemDiff.Before, elemDiff.After, indent+4, path) + case plans.Create: + p.writeValue(elemDiff.After, elemDiff.Action, indent+4) + default: + // Should never happen since the above covers all + // actions that ctySequenceDiff can return. + p.writeValue(elemDiff.After, elemDiff.Action, indent+4) + } + + p.buf.WriteString(",\n") + } + + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteString("]") + return + + case ty.IsMapType(): + p.buf.WriteString("{") + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + p.buf.WriteString("\n") + + var allKeys []string + keyLen := 0 + for it := old.ElementIterator(); it.Next(); { + k, _ := it.Element() + keyStr := k.AsString() + allKeys = append(allKeys, keyStr) + if len(keyStr) > keyLen { + keyLen = len(keyStr) + } + } + for it := new.ElementIterator(); it.Next(); { + k, _ := it.Element() + keyStr := k.AsString() + allKeys = append(allKeys, keyStr) + if len(keyStr) > keyLen { + keyLen = len(keyStr) + } + } + + sort.Strings(allKeys) + + lastK := "" + for i, k := range allKeys { + if i > 0 && lastK == k { + continue // skip duplicates (list is sorted) + } + lastK = k + + p.buf.WriteString(strings.Repeat(" ", indent+2)) + kV := cty.StringVal(k) + var action plans.Action + if old.HasIndex(kV).False() { + action = plans.Create + } else if new.HasIndex(kV).False() { + action = plans.Delete + } else if eqV := old.Index(kV).Equals(new.Index(kV)); eqV.IsKnown() && eqV.True() { + action = plans.NoOp + } else { + action = plans.Update + } + + path := append(path, cty.IndexStep{Key: kV}) + + p.writeActionSymbol(action) + p.writeValue(kV, action, indent+4) + p.buf.WriteString(strings.Repeat(" ", keyLen-len(k))) + p.buf.WriteString(" = ") + switch action { + case plans.Create, plans.NoOp: + v := new.Index(kV) + p.writeValue(v, action, indent+4) + case plans.Delete: + oldV := old.Index(kV) + newV := cty.NullVal(oldV.Type()) + p.writeValueDiff(oldV, newV, indent+4, path) + default: + oldV := old.Index(kV) + newV := new.Index(kV) + p.writeValueDiff(oldV, newV, indent+4, path) + } + + p.buf.WriteByte('\n') + } + + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteString("}") + return + case ty.IsObjectType(): + p.buf.WriteString("{") + p.buf.WriteString("\n") + + forcesNewResource := p.pathForcesNewResource(path) + + var allKeys []string + keyLen := 0 + for it := old.ElementIterator(); it.Next(); { + k, _ := it.Element() + keyStr := k.AsString() + allKeys = append(allKeys, keyStr) + if len(keyStr) > keyLen { + keyLen = len(keyStr) + } + } + for it := new.ElementIterator(); it.Next(); { + k, _ := it.Element() + keyStr := k.AsString() + allKeys = append(allKeys, keyStr) + if len(keyStr) > keyLen { + keyLen = len(keyStr) + } + } + + sort.Strings(allKeys) + + lastK := "" + for i, k := range allKeys { + if i > 0 && lastK == k { + continue // skip duplicates (list is sorted) + } + lastK = k + + p.buf.WriteString(strings.Repeat(" ", indent+2)) + kV := k + var action plans.Action + if !old.Type().HasAttribute(kV) { + action = plans.Create + } else if !new.Type().HasAttribute(kV) { + action = plans.Delete + } else if eqV := old.GetAttr(kV).Equals(new.GetAttr(kV)); eqV.IsKnown() && eqV.True() { + action = plans.NoOp + } else { + action = plans.Update + } + + path := append(path, cty.GetAttrStep{Name: kV}) + + p.writeActionSymbol(action) + p.buf.WriteString(k) + p.buf.WriteString(strings.Repeat(" ", keyLen-len(k))) + p.buf.WriteString(" = ") + + switch action { + case plans.Create, plans.NoOp: + v := new.GetAttr(kV) + p.writeValue(v, action, indent+4) + case plans.Delete: + oldV := old.GetAttr(kV) + newV := cty.NullVal(oldV.Type()) + p.writeValueDiff(oldV, newV, indent+4, path) + default: + oldV := old.GetAttr(kV) + newV := new.GetAttr(kV) + p.writeValueDiff(oldV, newV, indent+4, path) + } + + p.buf.WriteString("\n") + } + + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteString("}") + + if forcesNewResource { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + return + } + } + + // In all other cases, we just show the new and old values as-is + p.writeValue(old, plans.Delete, indent) + if new.IsNull() { + p.buf.WriteString(p.color.Color(" [dark_gray]->[reset] ")) + } else { + p.buf.WriteString(p.color.Color(" [yellow]->[reset] ")) + } + + p.writeValue(new, plans.Create, indent) + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } +} + +// writeActionSymbol writes a symbol to represent the given action, followed +// by a space. +// +// It only supports the actions that can be represented with a single character: +// Create, Delete, Update and NoAction. +func (p *blockBodyDiffPrinter) writeActionSymbol(action plans.Action) { + switch action { + case plans.Create: + p.buf.WriteString(p.color.Color("[green]+[reset] ")) + case plans.Delete: + p.buf.WriteString(p.color.Color("[red]-[reset] ")) + case plans.Update: + p.buf.WriteString(p.color.Color("[yellow]~[reset] ")) + case plans.NoOp: + p.buf.WriteString(" ") + default: + // Should never happen + p.buf.WriteString(p.color.Color("? ")) + } +} + +func (p *blockBodyDiffPrinter) pathForcesNewResource(path cty.Path) bool { + if !p.action.IsReplace() { + // "requiredReplace" only applies when the instance is being replaced + return false + } + return p.requiredReplace.Has(path) +} + +func ctyEmptyString(value cty.Value) bool { + if !value.IsNull() && value.IsKnown() { + valueType := value.Type() + if valueType == cty.String && value.AsString() == "" { + return true + } + } + return false +} + +func ctyGetAttrMaybeNull(val cty.Value, name string) cty.Value { + attrType := val.Type().AttributeType(name) + + if val.IsNull() { + return cty.NullVal(attrType) + } + + // We treat "" as null here + // as existing SDK doesn't support null yet. + // This allows us to avoid spurious diffs + // until we introduce null to the SDK. + attrValue := val.GetAttr(name) + if ctyEmptyString(attrValue) { + return cty.NullVal(attrType) + } + + return attrValue +} + +func ctyCollectionValues(val cty.Value) []cty.Value { + if !val.IsKnown() || val.IsNull() { + return nil + } + + ret := make([]cty.Value, 0, val.LengthInt()) + for it := val.ElementIterator(); it.Next(); { + _, value := it.Element() + ret = append(ret, value) + } + return ret +} + +// ctySequenceDiff returns differences between given sequences of cty.Value(s) +// in the form of Create, Delete, or Update actions (for objects). +func ctySequenceDiff(old, new []cty.Value) []*plans.Change { + var ret []*plans.Change + lcs := objchange.LongestCommonSubsequence(old, new) + var oldI, newI, lcsI int + for oldI < len(old) || newI < len(new) || lcsI < len(lcs) { + for oldI < len(old) && (lcsI >= len(lcs) || !old[oldI].RawEquals(lcs[lcsI])) { + isObjectDiff := old[oldI].Type().IsObjectType() && (newI >= len(new) || new[newI].Type().IsObjectType()) + if isObjectDiff && newI < len(new) { + ret = append(ret, &plans.Change{ + Action: plans.Update, + Before: old[oldI], + After: new[newI], + }) + oldI++ + newI++ // we also consume the next "new" in this case + continue + } + + ret = append(ret, &plans.Change{ + Action: plans.Delete, + Before: old[oldI], + After: cty.NullVal(old[oldI].Type()), + }) + oldI++ + } + for newI < len(new) && (lcsI >= len(lcs) || !new[newI].RawEquals(lcs[lcsI])) { + ret = append(ret, &plans.Change{ + Action: plans.Create, + Before: cty.NullVal(new[newI].Type()), + After: new[newI], + }) + newI++ + } + if lcsI < len(lcs) { + ret = append(ret, &plans.Change{ + Action: plans.NoOp, + Before: new[newI], + After: new[newI], + }) + + // All of our indexes advance together now, since the line + // is common to all three sequences. + lcsI++ + oldI++ + newI++ + } + } + return ret +} + +func ctyEqualWithUnknown(old, new cty.Value) bool { + if !old.IsWhollyKnown() || !new.IsWhollyKnown() { + return false + } + return old.Equals(new).True() +} + +// ctyTypesEqual checks equality of two types more loosely +// by avoiding checks of object/tuple elements +// as we render differences on element-by-element basis anyway +func ctyTypesEqual(oldT, newT cty.Type) bool { + if oldT.IsObjectType() && newT.IsObjectType() { + return true + } + if oldT.IsTupleType() && newT.IsTupleType() { + return true + } + return oldT.Equals(newT) +} + +func ctyEnsurePathCapacity(path cty.Path, minExtra int) cty.Path { + if cap(path)-len(path) >= minExtra { + return path + } + newCap := cap(path) * 2 + if newCap < (len(path) + minExtra) { + newCap = len(path) + minExtra + } + newPath := make(cty.Path, len(path), newCap) + copy(newPath, path) + return newPath +} + +// ctyNullBlockListAsEmpty either returns the given value verbatim if it is non-nil +// or returns an empty value of a suitable type to serve as a placeholder for it. +// +// In particular, this function handles the special situation where a "list" is +// actually represented as a tuple type where nested blocks contain +// dynamically-typed values. +func ctyNullBlockListAsEmpty(in cty.Value) cty.Value { + if !in.IsNull() { + return in + } + if ty := in.Type(); ty.IsListType() { + return cty.ListValEmpty(ty.ElementType()) + } + return cty.EmptyTupleVal // must need a tuple, then +} + +// ctyNullBlockMapAsEmpty either returns the given value verbatim if it is non-nil +// or returns an empty value of a suitable type to serve as a placeholder for it. +// +// In particular, this function handles the special situation where a "map" is +// actually represented as an object type where nested blocks contain +// dynamically-typed values. +func ctyNullBlockMapAsEmpty(in cty.Value) cty.Value { + if !in.IsNull() { + return in + } + if ty := in.Type(); ty.IsMapType() { + return cty.MapValEmpty(ty.ElementType()) + } + return cty.EmptyObjectVal // must need an object, then +} + +// ctyNullBlockSetAsEmpty either returns the given value verbatim if it is non-nil +// or returns an empty value of a suitable type to serve as a placeholder for it. +func ctyNullBlockSetAsEmpty(in cty.Value) cty.Value { + if !in.IsNull() { + return in + } + // Dynamically-typed attributes are not supported inside blocks backed by + // sets, so our result here is always a set. + return cty.SetValEmpty(in.Type().ElementType()) +} diff --git a/vendor/github.com/hashicorp/terraform/command/format/format.go b/vendor/github.com/hashicorp/terraform/command/format/format.go new file mode 100644 index 0000000000..aa8d7deb2a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/format/format.go @@ -0,0 +1,8 @@ +// Package format contains helpers for formatting various Terraform +// structures for human-readabout output. +// +// This package is used by the official Terraform CLI in formatting any +// output and is exported to encourage non-official frontends to mimic the +// output formatting as much as possible so that text formats of Terraform +// structures have a consistent look and feel. +package format diff --git a/vendor/github.com/hashicorp/terraform/command/format/object_id.go b/vendor/github.com/hashicorp/terraform/command/format/object_id.go new file mode 100644 index 0000000000..85ebbfec5e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/format/object_id.go @@ -0,0 +1,123 @@ +package format + +import ( + "github.com/zclconf/go-cty/cty" +) + +// ObjectValueID takes a value that is assumed to be an object representation +// of some resource instance object and attempts to heuristically find an +// attribute of it that is likely to be a unique identifier in the remote +// system that it belongs to which will be useful to the user. +// +// If such an attribute is found, its name and string value intended for +// display are returned. Both returned strings are empty if no such attribute +// exists, in which case the caller should assume that the resource instance +// address within the Terraform configuration is the best available identifier. +// +// This is only a best-effort sort of thing, relying on naming conventions in +// our resource type schemas. The result is not guaranteed to be unique, but +// should generally be suitable for display to an end-user anyway. +// +// This function will panic if the given value is not of an object type. +func ObjectValueID(obj cty.Value) (k, v string) { + if obj.IsNull() || !obj.IsKnown() { + return "", "" + } + + atys := obj.Type().AttributeTypes() + + switch { + + case atys["id"] == cty.String: + v := obj.GetAttr("id") + if v.IsKnown() && !v.IsNull() { + return "id", v.AsString() + } + + case atys["name"] == cty.String: + // "name" isn't always globally unique, but if there isn't also an + // "id" then it _often_ is, in practice. + v := obj.GetAttr("name") + if v.IsKnown() && !v.IsNull() { + return "name", v.AsString() + } + } + + return "", "" +} + +// ObjectValueName takes a value that is assumed to be an object representation +// of some resource instance object and attempts to heuristically find an +// attribute of it that is likely to be a human-friendly name in the remote +// system that it belongs to which will be useful to the user. +// +// If such an attribute is found, its name and string value intended for +// display are returned. Both returned strings are empty if no such attribute +// exists, in which case the caller should assume that the resource instance +// address within the Terraform configuration is the best available identifier. +// +// This is only a best-effort sort of thing, relying on naming conventions in +// our resource type schemas. The result is not guaranteed to be unique, but +// should generally be suitable for display to an end-user anyway. +// +// Callers that use both ObjectValueName and ObjectValueID at the same time +// should be prepared to get the same attribute key and value from both in +// some cases, since there is overlap betweek the id-extraction and +// name-extraction heuristics. +// +// This function will panic if the given value is not of an object type. +func ObjectValueName(obj cty.Value) (k, v string) { + if obj.IsNull() || !obj.IsKnown() { + return "", "" + } + + atys := obj.Type().AttributeTypes() + + switch { + + case atys["name"] == cty.String: + v := obj.GetAttr("name") + if v.IsKnown() && !v.IsNull() { + return "name", v.AsString() + } + + case atys["tags"].IsMapType() && atys["tags"].ElementType() == cty.String: + tags := obj.GetAttr("tags") + if tags.IsNull() || !tags.IsWhollyKnown() { + break + } + + switch { + case tags.HasIndex(cty.StringVal("name")).RawEquals(cty.True): + v := tags.Index(cty.StringVal("name")) + if v.IsKnown() && !v.IsNull() { + return "tags.name", v.AsString() + } + case tags.HasIndex(cty.StringVal("Name")).RawEquals(cty.True): + // AWS-style naming convention + v := tags.Index(cty.StringVal("Name")) + if v.IsKnown() && !v.IsNull() { + return "tags.Name", v.AsString() + } + } + } + + return "", "" +} + +// ObjectValueIDOrName is a convenience wrapper around both ObjectValueID +// and ObjectValueName (in that preference order) to try to extract some sort +// of human-friendly descriptive string value for an object as additional +// context about an object when it is being displayed in a compact way (where +// not all of the attributes are visible.) +// +// Just as with the two functions it wraps, it is a best-effort and may return +// two empty strings if no suitable attribute can be found for a given object. +func ObjectValueIDOrName(obj cty.Value) (k, v string) { + k, v = ObjectValueID(obj) + if k != "" { + return + } + k, v = ObjectValueName(obj) + return +} diff --git a/vendor/github.com/hashicorp/terraform/command/format/plan.go b/vendor/github.com/hashicorp/terraform/command/format/plan.go new file mode 100644 index 0000000000..098653fcf6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/format/plan.go @@ -0,0 +1,302 @@ +package format + +import ( + "bytes" + "fmt" + "log" + "sort" + "strings" + + "github.com/mitchellh/colorstring" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" +) + +// Plan is a representation of a plan optimized for display to +// an end-user, as opposed to terraform.Plan which is for internal use. +// +// DisplayPlan excludes implementation details that may otherwise appear +// in the main plan, such as destroy actions on data sources (which are +// there only to clean up the state). +type Plan struct { + Resources []*InstanceDiff +} + +// InstanceDiff is a representation of an instance diff optimized +// for display, in conjunction with DisplayPlan. +type InstanceDiff struct { + Addr *terraform.ResourceAddress + Action plans.Action + + // Attributes describes changes to the attributes of the instance. + // + // For destroy diffs this is always nil. + Attributes []*AttributeDiff + + Tainted bool + Deposed bool +} + +// AttributeDiff is a representation of an attribute diff optimized +// for display, in conjunction with DisplayInstanceDiff. +type AttributeDiff struct { + // Path is a dot-delimited traversal through possibly many levels of list and map structure, + // intended for display purposes only. + Path string + + Action plans.Action + + OldValue string + NewValue string + + NewComputed bool + Sensitive bool + ForcesNew bool +} + +// PlanStats gives summary counts for a Plan. +type PlanStats struct { + ToAdd, ToChange, ToDestroy int +} + +// NewPlan produces a display-oriented Plan from a terraform.Plan. +func NewPlan(changes *plans.Changes) *Plan { + log.Printf("[TRACE] NewPlan for %#v", changes) + ret := &Plan{} + if changes == nil { + // Nothing to do! + return ret + } + + for _, rc := range changes.Resources { + addr := rc.Addr + log.Printf("[TRACE] NewPlan found %s (%s)", addr, rc.Action) + dataSource := addr.Resource.Resource.Mode == addrs.DataResourceMode + + // We create "delete" actions for data resources so we can clean + // up their entries in state, but this is an implementation detail + // that users shouldn't see. + if dataSource && rc.Action == plans.Delete { + continue + } + + // For now we'll shim this to work with our old types. + // TODO: Update for the new plan types, ideally also switching over to + // a structural diff renderer instead of a flat renderer. + did := &InstanceDiff{ + Addr: terraform.NewLegacyResourceInstanceAddress(addr), + Action: rc.Action, + } + + if rc.DeposedKey != states.NotDeposed { + did.Deposed = true + } + + // Since this is just a temporary stub implementation on the way + // to us replacing this with the structural diff renderer, we currently + // don't include any attributes here. + // FIXME: Implement the structural diff renderer to replace this + // codepath altogether. + + ret.Resources = append(ret.Resources, did) + } + + // Sort the instance diffs by their addresses for display. + sort.Slice(ret.Resources, func(i, j int) bool { + iAddr := ret.Resources[i].Addr + jAddr := ret.Resources[j].Addr + return iAddr.Less(jAddr) + }) + + return ret +} + +// Format produces and returns a text representation of the receiving plan +// intended for display in a terminal. +// +// If color is not nil, it is used to colorize the output. +func (p *Plan) Format(color *colorstring.Colorize) string { + if p.Empty() { + return "This plan does nothing." + } + + if color == nil { + color = &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Reset: false, + } + } + + // Find the longest path length of all the paths that are changing, + // so we can align them all. + keyLen := 0 + for _, r := range p.Resources { + for _, attr := range r.Attributes { + key := attr.Path + + if len(key) > keyLen { + keyLen = len(key) + } + } + } + + buf := new(bytes.Buffer) + for _, r := range p.Resources { + formatPlanInstanceDiff(buf, r, keyLen, color) + } + + return strings.TrimSpace(buf.String()) +} + +// Stats returns statistics about the plan +func (p *Plan) Stats() PlanStats { + var ret PlanStats + for _, r := range p.Resources { + switch r.Action { + case plans.Create: + ret.ToAdd++ + case plans.Update: + ret.ToChange++ + case plans.DeleteThenCreate, plans.CreateThenDelete: + ret.ToAdd++ + ret.ToDestroy++ + case plans.Delete: + ret.ToDestroy++ + } + } + return ret +} + +// ActionCounts returns the number of diffs for each action type +func (p *Plan) ActionCounts() map[plans.Action]int { + ret := map[plans.Action]int{} + for _, r := range p.Resources { + ret[r.Action]++ + } + return ret +} + +// Empty returns true if there is at least one resource diff in the receiving plan. +func (p *Plan) Empty() bool { + return len(p.Resources) == 0 +} + +// DiffActionSymbol returns a string that, once passed through a +// colorstring.Colorize, will produce a result that can be written +// to a terminal to produce a symbol made of three printable +// characters, possibly interspersed with VT100 color codes. +func DiffActionSymbol(action plans.Action) string { + switch action { + case plans.DeleteThenCreate: + return "[red]-[reset]/[green]+[reset]" + case plans.CreateThenDelete: + return "[green]+[reset]/[red]-[reset]" + case plans.Create: + return " [green]+[reset]" + case plans.Delete: + return " [red]-[reset]" + case plans.Read: + return " [cyan]<=[reset]" + case plans.Update: + return " [yellow]~[reset]" + default: + return " ?" + } +} + +// formatPlanInstanceDiff writes the text representation of the given instance diff +// to the given buffer, using the given colorizer. +func formatPlanInstanceDiff(buf *bytes.Buffer, r *InstanceDiff, keyLen int, colorizer *colorstring.Colorize) { + addrStr := r.Addr.String() + + // Determine the color for the text (green for adding, yellow + // for change, red for delete), and symbol, and output the + // resource header. + color := "yellow" + symbol := DiffActionSymbol(r.Action) + oldValues := true + switch r.Action { + case plans.DeleteThenCreate, plans.CreateThenDelete: + color = "yellow" + case plans.Create: + color = "green" + oldValues = false + case plans.Delete: + color = "red" + case plans.Read: + color = "cyan" + oldValues = false + } + + var extraStr string + if r.Tainted { + extraStr = extraStr + " (tainted)" + } + if r.Deposed { + extraStr = extraStr + " (deposed)" + } + if r.Action.IsReplace() { + extraStr = extraStr + colorizer.Color(" [red][bold](new resource required)") + } + + buf.WriteString( + colorizer.Color(fmt.Sprintf( + "[%s]%s [%s]%s%s\n", + color, symbol, color, addrStr, extraStr, + )), + ) + + for _, attr := range r.Attributes { + + v := attr.NewValue + var dispV string + switch { + case v == "" && attr.NewComputed: + dispV = "" + case attr.Sensitive: + dispV = "" + default: + dispV = fmt.Sprintf("%q", v) + } + + updateMsg := "" + switch { + case attr.ForcesNew && r.Action.IsReplace(): + updateMsg = colorizer.Color(" [red](forces new resource)") + case attr.Sensitive && oldValues: + updateMsg = colorizer.Color(" [yellow](attribute changed)") + } + + if oldValues { + u := attr.OldValue + var dispU string + switch { + case attr.Sensitive: + dispU = "" + default: + dispU = fmt.Sprintf("%q", u) + } + buf.WriteString(fmt.Sprintf( + " %s:%s %s => %s%s\n", + attr.Path, + strings.Repeat(" ", keyLen-len(attr.Path)), + dispU, dispV, + updateMsg, + )) + } else { + buf.WriteString(fmt.Sprintf( + " %s:%s %s%s\n", + attr.Path, + strings.Repeat(" ", keyLen-len(attr.Path)), + dispV, + updateMsg, + )) + } + } + + // Write the reset color so we don't bleed color into later text + buf.WriteString(colorizer.Color("[reset]\n")) +} diff --git a/vendor/github.com/hashicorp/terraform/command/format/state.go b/vendor/github.com/hashicorp/terraform/command/format/state.go new file mode 100644 index 0000000000..f411ef9c61 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/format/state.go @@ -0,0 +1,286 @@ +package format + +import ( + "bytes" + "fmt" + "sort" + "strings" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/colorstring" +) + +// StateOpts are the options for formatting a state. +type StateOpts struct { + // State is the state to format. This is required. + State *states.State + + // Schemas are used to decode attributes. This is required. + Schemas *terraform.Schemas + + // Color is the colorizer. This is optional. + Color *colorstring.Colorize +} + +// State takes a state and returns a string +func State(opts *StateOpts) string { + if opts.Color == nil { + panic("colorize not given") + } + + if opts.Schemas == nil { + panic("schemas not given") + } + + s := opts.State + if len(s.Modules) == 0 { + return "The state file is empty. No resources are represented." + } + + buf := bytes.NewBufferString("[reset]") + p := blockBodyDiffPrinter{ + buf: buf, + color: opts.Color, + action: plans.NoOp, + } + + // Format all the modules + for _, m := range s.Modules { + formatStateModule(p, m, opts.Schemas) + } + + // Write the outputs for the root module + m := s.RootModule() + + if m.OutputValues != nil { + if len(m.OutputValues) > 0 { + p.buf.WriteString("Outputs:\n\n") + } + + // Sort the outputs + ks := make([]string, 0, len(m.OutputValues)) + for k := range m.OutputValues { + ks = append(ks, k) + } + sort.Strings(ks) + + // Output each output k/v pair + for _, k := range ks { + v := m.OutputValues[k] + p.buf.WriteString(fmt.Sprintf("%s = ", k)) + p.writeValue(v.Value, plans.NoOp, 0) + p.buf.WriteString("\n\n") + } + } + + return opts.Color.Color(strings.TrimSpace(p.buf.String())) + +} + +func formatStateModule(p blockBodyDiffPrinter, m *states.Module, schemas *terraform.Schemas) { + // First get the names of all the resources so we can show them + // in alphabetical order. + names := make([]string, 0, len(m.Resources)) + for name := range m.Resources { + names = append(names, name) + } + sort.Strings(names) + + // Go through each resource and begin building up the output. + for _, key := range names { + for k, v := range m.Resources[key].Instances { + addr := m.Resources[key].Addr + + taintStr := "" + if v.Current.Status == 'T' { + taintStr = "(tainted)" + } + p.buf.WriteString(fmt.Sprintf("# %s: %s\n", addr.Absolute(m.Addr).Instance(k), taintStr)) + + var schema *configschema.Block + provider := m.Resources[key].ProviderConfig.ProviderConfig.StringCompact() + if _, exists := schemas.Providers[provider]; !exists { + // This should never happen in normal use because we should've + // loaded all of the schemas and checked things prior to this + // point. We can't return errors here, but since this is UI code + // we will try to do _something_ reasonable. + p.buf.WriteString(fmt.Sprintf("# missing schema for provider %q\n\n", provider)) + continue + } + + switch addr.Mode { + case addrs.ManagedResourceMode: + schema, _ = schemas.ResourceTypeConfig( + provider, + addr.Mode, + addr.Type, + ) + if schema == nil { + p.buf.WriteString(fmt.Sprintf( + "# missing schema for provider %q resource type %s\n\n", provider, addr.Type)) + continue + } + + p.buf.WriteString(fmt.Sprintf( + "resource %q %q {", + addr.Type, + addr.Name, + )) + case addrs.DataResourceMode: + schema, _ = schemas.ResourceTypeConfig( + provider, + addr.Mode, + addr.Type, + ) + if schema == nil { + p.buf.WriteString(fmt.Sprintf( + "# missing schema for provider %q data source %s\n\n", provider, addr.Type)) + continue + } + + p.buf.WriteString(fmt.Sprintf( + "data %q %q {", + addr.Type, + addr.Name, + )) + default: + // should never happen, since the above is exhaustive + p.buf.WriteString(addr.String()) + } + + val, err := v.Current.Decode(schema.ImpliedType()) + if err != nil { + fmt.Println(err.Error()) + break + } + + path := make(cty.Path, 0, 3) + bodyWritten := p.writeBlockBodyDiff(schema, val.Value, val.Value, 2, path) + if bodyWritten { + p.buf.WriteString("\n") + } + + p.buf.WriteString("}\n\n") + } + } + p.buf.WriteString("[reset]\n") +} + +func formatNestedList(indent string, outputList []interface{}) string { + outputBuf := new(bytes.Buffer) + outputBuf.WriteString(fmt.Sprintf("%s[", indent)) + + lastIdx := len(outputList) - 1 + + for i, value := range outputList { + outputBuf.WriteString(fmt.Sprintf("\n%s%s%s", indent, " ", value)) + if i != lastIdx { + outputBuf.WriteString(",") + } + } + + outputBuf.WriteString(fmt.Sprintf("\n%s]", indent)) + return strings.TrimPrefix(outputBuf.String(), "\n") +} + +func formatListOutput(indent, outputName string, outputList []interface{}) string { + keyIndent := "" + + outputBuf := new(bytes.Buffer) + + if outputName != "" { + outputBuf.WriteString(fmt.Sprintf("%s%s = [", indent, outputName)) + keyIndent = " " + } + + lastIdx := len(outputList) - 1 + + for i, value := range outputList { + switch typedValue := value.(type) { + case string: + outputBuf.WriteString(fmt.Sprintf("\n%s%s%s", indent, keyIndent, value)) + case []interface{}: + outputBuf.WriteString(fmt.Sprintf("\n%s%s", indent, + formatNestedList(indent+keyIndent, typedValue))) + case map[string]interface{}: + outputBuf.WriteString(fmt.Sprintf("\n%s%s", indent, + formatNestedMap(indent+keyIndent, typedValue))) + } + + if lastIdx != i { + outputBuf.WriteString(",") + } + } + + if outputName != "" { + if len(outputList) > 0 { + outputBuf.WriteString(fmt.Sprintf("\n%s]", indent)) + } else { + outputBuf.WriteString("]") + } + } + + return strings.TrimPrefix(outputBuf.String(), "\n") +} + +func formatNestedMap(indent string, outputMap map[string]interface{}) string { + ks := make([]string, 0, len(outputMap)) + for k, _ := range outputMap { + ks = append(ks, k) + } + sort.Strings(ks) + + outputBuf := new(bytes.Buffer) + outputBuf.WriteString(fmt.Sprintf("%s{", indent)) + + lastIdx := len(outputMap) - 1 + for i, k := range ks { + v := outputMap[k] + outputBuf.WriteString(fmt.Sprintf("\n%s%s = %v", indent+" ", k, v)) + + if lastIdx != i { + outputBuf.WriteString(",") + } + } + + outputBuf.WriteString(fmt.Sprintf("\n%s}", indent)) + + return strings.TrimPrefix(outputBuf.String(), "\n") +} + +func formatMapOutput(indent, outputName string, outputMap map[string]interface{}) string { + ks := make([]string, 0, len(outputMap)) + for k, _ := range outputMap { + ks = append(ks, k) + } + sort.Strings(ks) + + keyIndent := "" + + outputBuf := new(bytes.Buffer) + if outputName != "" { + outputBuf.WriteString(fmt.Sprintf("%s%s = {", indent, outputName)) + keyIndent = " " + } + + for _, k := range ks { + v := outputMap[k] + outputBuf.WriteString(fmt.Sprintf("\n%s%s%s = %v", indent, keyIndent, k, v)) + } + + if outputName != "" { + if len(outputMap) > 0 { + outputBuf.WriteString(fmt.Sprintf("\n%s}", indent)) + } else { + outputBuf.WriteString("}") + } + } + + return strings.TrimPrefix(outputBuf.String(), "\n") +} diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/decoder_spec.go b/vendor/github.com/hashicorp/terraform/config/configschema/decoder_spec.go deleted file mode 100644 index 2b1b0cacbf..0000000000 --- a/vendor/github.com/hashicorp/terraform/config/configschema/decoder_spec.go +++ /dev/null @@ -1,97 +0,0 @@ -package configschema - -import ( - "github.com/hashicorp/hcl2/hcldec" - "github.com/zclconf/go-cty/cty" -) - -var mapLabelNames = []string{"key"} - -// DecoderSpec returns a hcldec.Spec that can be used to decode a HCL Body -// using the facilities in the hcldec package. -// -// The returned specification is guaranteed to return a value of the same type -// returned by method ImpliedType, but it may contain null or unknown values if -// any of the block attributes are defined as optional and/or computed -// respectively. -func (b *Block) DecoderSpec() hcldec.Spec { - ret := hcldec.ObjectSpec{} - if b == nil { - return ret - } - - for name, attrS := range b.Attributes { - switch { - case attrS.Computed && attrS.Optional: - // In this special case we use an unknown value as a default - // to get the intended behavior that the result is computed - // unless it has been explicitly set in config. - ret[name] = &hcldec.DefaultSpec{ - Primary: &hcldec.AttrSpec{ - Name: name, - Type: attrS.Type, - }, - Default: &hcldec.LiteralSpec{ - Value: cty.UnknownVal(attrS.Type), - }, - } - case attrS.Computed: - ret[name] = &hcldec.LiteralSpec{ - Value: cty.UnknownVal(attrS.Type), - } - default: - ret[name] = &hcldec.AttrSpec{ - Name: name, - Type: attrS.Type, - Required: attrS.Required, - } - } - } - - for name, blockS := range b.BlockTypes { - if _, exists := ret[name]; exists { - // This indicates an invalid schema, since it's not valid to - // define both an attribute and a block type of the same name. - // However, we don't raise this here since it's checked by - // InternalValidate. - continue - } - - childSpec := blockS.Block.DecoderSpec() - - switch blockS.Nesting { - case NestingSingle: - ret[name] = &hcldec.BlockSpec{ - TypeName: name, - Nested: childSpec, - Required: blockS.MinItems == 1 && blockS.MaxItems >= 1, - } - case NestingList: - ret[name] = &hcldec.BlockListSpec{ - TypeName: name, - Nested: childSpec, - MinItems: blockS.MinItems, - MaxItems: blockS.MaxItems, - } - case NestingSet: - ret[name] = &hcldec.BlockSetSpec{ - TypeName: name, - Nested: childSpec, - MinItems: blockS.MinItems, - MaxItems: blockS.MaxItems, - } - case NestingMap: - ret[name] = &hcldec.BlockMapSpec{ - TypeName: name, - Nested: childSpec, - LabelNames: mapLabelNames, - } - default: - // Invalid nesting type is just ignored. It's checked by - // InternalValidate. - continue - } - } - - return ret -} diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/implied_type.go b/vendor/github.com/hashicorp/terraform/config/configschema/implied_type.go deleted file mode 100644 index 67324ebce1..0000000000 --- a/vendor/github.com/hashicorp/terraform/config/configschema/implied_type.go +++ /dev/null @@ -1,21 +0,0 @@ -package configschema - -import ( - "github.com/hashicorp/hcl2/hcldec" - "github.com/zclconf/go-cty/cty" -) - -// ImpliedType returns the cty.Type that would result from decoding a -// configuration block using the receiving block schema. -// -// ImpliedType always returns a result, even if the given schema is -// inconsistent. Code that creates configschema.Block objects should be -// tested using the InternalValidate method to detect any inconsistencies -// that would cause this method to fall back on defaults and assumptions. -func (b *Block) ImpliedType() cty.Type { - if b == nil { - return cty.EmptyObject - } - - return hcldec.ImpliedType(b.DecoderSpec()) -} diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go b/vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go deleted file mode 100644 index 6cb9313e26..0000000000 --- a/vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type=NestingMode"; DO NOT EDIT. - -package configschema - -import "strconv" - -const _NestingMode_name = "nestingModeInvalidNestingSingleNestingListNestingSetNestingMap" - -var _NestingMode_index = [...]uint8{0, 18, 31, 42, 52, 62} - -func (i NestingMode) String() string { - if i < 0 || i >= NestingMode(len(_NestingMode_index)-1) { - return "NestingMode(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _NestingMode_name[_NestingMode_index[i]:_NestingMode_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/terraform/config/hcl2shim/flatmap.go b/vendor/github.com/hashicorp/terraform/config/hcl2shim/flatmap.go new file mode 100644 index 0000000000..bb4228d98c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/hcl2shim/flatmap.go @@ -0,0 +1,424 @@ +package hcl2shim + +import ( + "fmt" + "strconv" + "strings" + + "github.com/zclconf/go-cty/cty/convert" + + "github.com/zclconf/go-cty/cty" +) + +// FlatmapValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic +// types library that HCL2 uses) to a map compatible with what would be +// produced by the "flatmap" package. +// +// The type of the given value informs the structure of the resulting map. +// The value must be of an object type or this function will panic. +// +// Flatmap values can only represent maps when they are of primitive types, +// so the given value must not have any maps of complex types or the result +// is undefined. +func FlatmapValueFromHCL2(v cty.Value) map[string]string { + if v.IsNull() { + return nil + } + + if !v.Type().IsObjectType() { + panic(fmt.Sprintf("HCL2ValueFromFlatmap called on %#v", v.Type())) + } + + m := make(map[string]string) + flatmapValueFromHCL2Map(m, "", v) + return m +} + +func flatmapValueFromHCL2Value(m map[string]string, key string, val cty.Value) { + ty := val.Type() + switch { + case ty.IsPrimitiveType() || ty == cty.DynamicPseudoType: + flatmapValueFromHCL2Primitive(m, key, val) + case ty.IsObjectType() || ty.IsMapType(): + flatmapValueFromHCL2Map(m, key+".", val) + case ty.IsTupleType() || ty.IsListType() || ty.IsSetType(): + flatmapValueFromHCL2Seq(m, key+".", val) + default: + panic(fmt.Sprintf("cannot encode %s to flatmap", ty.FriendlyName())) + } +} + +func flatmapValueFromHCL2Primitive(m map[string]string, key string, val cty.Value) { + if !val.IsKnown() { + m[key] = UnknownVariableValue + return + } + if val.IsNull() { + // Omit entirely + return + } + + var err error + val, err = convert.Convert(val, cty.String) + if err != nil { + // Should not be possible, since all primitive types can convert to string. + panic(fmt.Sprintf("invalid primitive encoding to flatmap: %s", err)) + } + m[key] = val.AsString() +} + +func flatmapValueFromHCL2Map(m map[string]string, prefix string, val cty.Value) { + if val.IsNull() { + // Omit entirely + return + } + if !val.IsKnown() { + switch { + case val.Type().IsObjectType(): + // Whole objects can't be unknown in flatmap, so instead we'll + // just write all of the attribute values out as unknown. + for name, aty := range val.Type().AttributeTypes() { + flatmapValueFromHCL2Value(m, prefix+name, cty.UnknownVal(aty)) + } + default: + m[prefix+"%"] = UnknownVariableValue + } + return + } + + len := 0 + for it := val.ElementIterator(); it.Next(); { + ak, av := it.Element() + name := ak.AsString() + flatmapValueFromHCL2Value(m, prefix+name, av) + len++ + } + if !val.Type().IsObjectType() { // objects don't have an explicit count included, since their attribute count is fixed + m[prefix+"%"] = strconv.Itoa(len) + } +} + +func flatmapValueFromHCL2Seq(m map[string]string, prefix string, val cty.Value) { + if val.IsNull() { + // Omit entirely + return + } + if !val.IsKnown() { + m[prefix+"#"] = UnknownVariableValue + return + } + + // For sets this won't actually generate exactly what helper/schema would've + // generated, because we don't have access to the set key function it + // would've used. However, in practice it doesn't actually matter what the + // keys are as long as they are unique, so we'll just generate sequential + // indexes for them as if it were a list. + // + // An important implication of this, however, is that the set ordering will + // not be consistent across mutations and so different keys may be assigned + // to the same value when round-tripping. Since this shim is intended to + // be short-lived and not used for round-tripping, we accept this. + i := 0 + for it := val.ElementIterator(); it.Next(); { + _, av := it.Element() + key := prefix + strconv.Itoa(i) + flatmapValueFromHCL2Value(m, key, av) + i++ + } + m[prefix+"#"] = strconv.Itoa(i) +} + +// HCL2ValueFromFlatmap converts a map compatible with what would be produced +// by the "flatmap" package to a HCL2 (really, the cty dynamic types library +// that HCL2 uses) object type. +// +// The intended result type must be provided in order to guide how the +// map contents are decoded. This must be an object type or this function +// will panic. +// +// Flatmap values can only represent maps when they are of primitive types, +// so the given type must not have any maps of complex types or the result +// is undefined. +// +// The result may contain null values if the given map does not contain keys +// for all of the different key paths implied by the given type. +func HCL2ValueFromFlatmap(m map[string]string, ty cty.Type) (cty.Value, error) { + if m == nil { + return cty.NullVal(ty), nil + } + if !ty.IsObjectType() { + panic(fmt.Sprintf("HCL2ValueFromFlatmap called on %#v", ty)) + } + + return hcl2ValueFromFlatmapObject(m, "", ty.AttributeTypes()) +} + +func hcl2ValueFromFlatmapValue(m map[string]string, key string, ty cty.Type) (cty.Value, error) { + var val cty.Value + var err error + switch { + case ty.IsPrimitiveType(): + val, err = hcl2ValueFromFlatmapPrimitive(m, key, ty) + case ty.IsObjectType(): + val, err = hcl2ValueFromFlatmapObject(m, key+".", ty.AttributeTypes()) + case ty.IsTupleType(): + val, err = hcl2ValueFromFlatmapTuple(m, key+".", ty.TupleElementTypes()) + case ty.IsMapType(): + val, err = hcl2ValueFromFlatmapMap(m, key+".", ty) + case ty.IsListType(): + val, err = hcl2ValueFromFlatmapList(m, key+".", ty) + case ty.IsSetType(): + val, err = hcl2ValueFromFlatmapSet(m, key+".", ty) + default: + err = fmt.Errorf("cannot decode %s from flatmap", ty.FriendlyName()) + } + + if err != nil { + return cty.DynamicVal, err + } + return val, nil +} + +func hcl2ValueFromFlatmapPrimitive(m map[string]string, key string, ty cty.Type) (cty.Value, error) { + rawVal, exists := m[key] + if !exists { + return cty.NullVal(ty), nil + } + if rawVal == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + var err error + val := cty.StringVal(rawVal) + val, err = convert.Convert(val, ty) + if err != nil { + // This should never happen for _valid_ input, but flatmap data might + // be tampered with by the user and become invalid. + return cty.DynamicVal, fmt.Errorf("invalid value for %q in state: %s", key, err) + } + + return val, nil +} + +func hcl2ValueFromFlatmapObject(m map[string]string, prefix string, atys map[string]cty.Type) (cty.Value, error) { + vals := make(map[string]cty.Value) + for name, aty := range atys { + val, err := hcl2ValueFromFlatmapValue(m, prefix+name, aty) + if err != nil { + return cty.DynamicVal, err + } + vals[name] = val + } + return cty.ObjectVal(vals), nil +} + +func hcl2ValueFromFlatmapTuple(m map[string]string, prefix string, etys []cty.Type) (cty.Value, error) { + var vals []cty.Value + + // if the container is unknown, there is no count string + listName := strings.TrimRight(prefix, ".") + if m[listName] == UnknownVariableValue { + return cty.UnknownVal(cty.Tuple(etys)), nil + } + + countStr, exists := m[prefix+"#"] + if !exists { + return cty.NullVal(cty.Tuple(etys)), nil + } + if countStr == UnknownVariableValue { + return cty.UnknownVal(cty.Tuple(etys)), nil + } + + count, err := strconv.Atoi(countStr) + if err != nil { + return cty.DynamicVal, fmt.Errorf("invalid count value for %q in state: %s", prefix, err) + } + if count != len(etys) { + return cty.DynamicVal, fmt.Errorf("wrong number of values for %q in state: got %d, but need %d", prefix, count, len(etys)) + } + + vals = make([]cty.Value, len(etys)) + for i, ety := range etys { + key := prefix + strconv.Itoa(i) + val, err := hcl2ValueFromFlatmapValue(m, key, ety) + if err != nil { + return cty.DynamicVal, err + } + vals[i] = val + } + return cty.TupleVal(vals), nil +} + +func hcl2ValueFromFlatmapMap(m map[string]string, prefix string, ty cty.Type) (cty.Value, error) { + vals := make(map[string]cty.Value) + ety := ty.ElementType() + + // if the container is unknown, there is no count string + listName := strings.TrimRight(prefix, ".") + if m[listName] == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + // We actually don't really care about the "count" of a map for our + // purposes here, but we do need to check if it _exists_ in order to + // recognize the difference between null (not set at all) and empty. + if strCount, exists := m[prefix+"%"]; !exists { + return cty.NullVal(ty), nil + } else if strCount == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + for fullKey := range m { + if !strings.HasPrefix(fullKey, prefix) { + continue + } + + // The flatmap format doesn't allow us to distinguish between keys + // that contain periods and nested objects, so by convention a + // map is only ever of primitive type in flatmap, and we just assume + // that the remainder of the raw key (dots and all) is the key we + // want in the result value. + key := fullKey[len(prefix):] + if key == "%" { + // Ignore the "count" key + continue + } + + val, err := hcl2ValueFromFlatmapValue(m, fullKey, ety) + if err != nil { + return cty.DynamicVal, err + } + vals[key] = val + } + + if len(vals) == 0 { + return cty.MapValEmpty(ety), nil + } + return cty.MapVal(vals), nil +} + +func hcl2ValueFromFlatmapList(m map[string]string, prefix string, ty cty.Type) (cty.Value, error) { + var vals []cty.Value + + // if the container is unknown, there is no count string + listName := strings.TrimRight(prefix, ".") + if m[listName] == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + countStr, exists := m[prefix+"#"] + if !exists { + return cty.NullVal(ty), nil + } + if countStr == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + count, err := strconv.Atoi(countStr) + if err != nil { + return cty.DynamicVal, fmt.Errorf("invalid count value for %q in state: %s", prefix, err) + } + + ety := ty.ElementType() + if count == 0 { + return cty.ListValEmpty(ety), nil + } + + vals = make([]cty.Value, count) + for i := 0; i < count; i++ { + key := prefix + strconv.Itoa(i) + val, err := hcl2ValueFromFlatmapValue(m, key, ety) + if err != nil { + return cty.DynamicVal, err + } + vals[i] = val + } + + return cty.ListVal(vals), nil +} + +func hcl2ValueFromFlatmapSet(m map[string]string, prefix string, ty cty.Type) (cty.Value, error) { + var vals []cty.Value + ety := ty.ElementType() + + // if the container is unknown, there is no count string + listName := strings.TrimRight(prefix, ".") + if m[listName] == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + strCount, exists := m[prefix+"#"] + if !exists { + return cty.NullVal(ty), nil + } else if strCount == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + // Keep track of keys we've seen, se we don't add the same set value + // multiple times. The cty.Set will normally de-duplicate values, but we may + // have unknown values that would not show as equivalent. + seen := map[string]bool{} + + for fullKey := range m { + if !strings.HasPrefix(fullKey, prefix) { + continue + } + subKey := fullKey[len(prefix):] + if subKey == "#" { + // Ignore the "count" key + continue + } + key := fullKey + if dot := strings.IndexByte(subKey, '.'); dot != -1 { + key = fullKey[:dot+len(prefix)] + } + + if seen[key] { + continue + } + + seen[key] = true + + // The flatmap format doesn't allow us to distinguish between keys + // that contain periods and nested objects, so by convention a + // map is only ever of primitive type in flatmap, and we just assume + // that the remainder of the raw key (dots and all) is the key we + // want in the result value. + + val, err := hcl2ValueFromFlatmapValue(m, key, ety) + if err != nil { + return cty.DynamicVal, err + } + vals = append(vals, val) + } + + if len(vals) == 0 && strCount == "1" { + // An empty set wouldn't be represented in the flatmap, so this must be + // a single empty object since the count is actually 1. + // Add an appropriately typed null value to the set. + var val cty.Value + switch { + case ety.IsMapType(): + val = cty.MapValEmpty(ety) + case ety.IsListType(): + val = cty.ListValEmpty(ety) + case ety.IsSetType(): + val = cty.SetValEmpty(ety) + case ety.IsObjectType(): + // TODO: cty.ObjectValEmpty + objectMap := map[string]cty.Value{} + for attr, ty := range ety.AttributeTypes() { + objectMap[attr] = cty.NullVal(ty) + } + val = cty.ObjectVal(objectMap) + default: + val = cty.NullVal(ety) + } + vals = append(vals, val) + + } else if len(vals) == 0 { + return cty.SetValEmpty(ety), nil + } + + return cty.SetVal(vals), nil +} diff --git a/vendor/github.com/hashicorp/terraform/config/hcl2shim/paths.go b/vendor/github.com/hashicorp/terraform/config/hcl2shim/paths.go new file mode 100644 index 0000000000..3403c026bf --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/hcl2shim/paths.go @@ -0,0 +1,276 @@ +package hcl2shim + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/zclconf/go-cty/cty" +) + +// RequiresReplace takes a list of flatmapped paths from a +// InstanceDiff.Attributes along with the corresponding cty.Type, and returns +// the list of the cty.Paths that are flagged as causing the resource +// replacement (RequiresNew). +// This will filter out redundant paths, paths that refer to flatmapped indexes +// (e.g. "#", "%"), and will return any changes within a set as the path to the +// set itself. +func RequiresReplace(attrs []string, ty cty.Type) ([]cty.Path, error) { + var paths []cty.Path + + for _, attr := range attrs { + p, err := requiresReplacePath(attr, ty) + if err != nil { + return nil, err + } + + paths = append(paths, p) + } + + // now trim off any trailing paths that aren't GetAttrSteps, since only an + // attribute itself can require replacement + paths = trimPaths(paths) + + // There may be redundant paths due to set elements or index attributes + // Do some ugly n^2 filtering, but these are always fairly small sets. + for i := 0; i < len(paths)-1; i++ { + for j := i + 1; j < len(paths); j++ { + if reflect.DeepEqual(paths[i], paths[j]) { + // swap the tail and slice it off + paths[j], paths[len(paths)-1] = paths[len(paths)-1], paths[j] + paths = paths[:len(paths)-1] + j-- + } + } + } + + return paths, nil +} + +// trimPaths removes any trailing steps that aren't of type GetAttrSet, since +// only an attribute itself can require replacement +func trimPaths(paths []cty.Path) []cty.Path { + var trimmed []cty.Path + for _, path := range paths { + path = trimPath(path) + if len(path) > 0 { + trimmed = append(trimmed, path) + } + } + return trimmed +} + +func trimPath(path cty.Path) cty.Path { + for len(path) > 0 { + _, isGetAttr := path[len(path)-1].(cty.GetAttrStep) + if isGetAttr { + break + } + path = path[:len(path)-1] + } + return path +} + +// requiresReplacePath takes a key from a flatmap along with the cty.Type +// describing the structure, and returns the cty.Path that would be used to +// reference the nested value in the data structure. +// This is used specifically to record the RequiresReplace attributes from a +// ResourceInstanceDiff. +func requiresReplacePath(k string, ty cty.Type) (cty.Path, error) { + if k == "" { + return nil, nil + } + if !ty.IsObjectType() { + panic(fmt.Sprintf("requires replace path on non-object type: %#v", ty)) + } + + path, err := pathFromFlatmapKeyObject(k, ty.AttributeTypes()) + if err != nil { + return path, fmt.Errorf("[%s] %s", k, err) + } + return path, nil +} + +func pathSplit(p string) (string, string) { + parts := strings.SplitN(p, ".", 2) + head := parts[0] + rest := "" + if len(parts) > 1 { + rest = parts[1] + } + return head, rest +} + +func pathFromFlatmapKeyObject(key string, atys map[string]cty.Type) (cty.Path, error) { + k, rest := pathSplit(key) + + path := cty.Path{cty.GetAttrStep{Name: k}} + + ty, ok := atys[k] + if !ok { + return path, fmt.Errorf("attribute %q not found", k) + } + + if rest == "" { + return path, nil + } + + p, err := pathFromFlatmapKeyValue(rest, ty) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeyValue(key string, ty cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + switch { + case ty.IsPrimitiveType(): + err = fmt.Errorf("invalid step %q with type %#v", key, ty) + case ty.IsObjectType(): + path, err = pathFromFlatmapKeyObject(key, ty.AttributeTypes()) + case ty.IsTupleType(): + path, err = pathFromFlatmapKeyTuple(key, ty.TupleElementTypes()) + case ty.IsMapType(): + path, err = pathFromFlatmapKeyMap(key, ty) + case ty.IsListType(): + path, err = pathFromFlatmapKeyList(key, ty) + case ty.IsSetType(): + path, err = pathFromFlatmapKeySet(key, ty) + default: + err = fmt.Errorf("unrecognized type: %s", ty.FriendlyName()) + } + + if err != nil { + return path, err + } + + return path, nil +} + +func pathFromFlatmapKeyTuple(key string, etys []cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + k, rest := pathSplit(key) + + // we don't need to convert the index keys to paths + if k == "#" { + return path, nil + } + + idx, err := strconv.Atoi(k) + if err != nil { + return path, err + } + + path = cty.Path{cty.IndexStep{Key: cty.NumberIntVal(int64(idx))}} + + if idx >= len(etys) { + return path, fmt.Errorf("index %s out of range in %#v", key, etys) + } + + if rest == "" { + return path, nil + } + + ty := etys[idx] + + p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeyMap(key string, ty cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + k, rest := key, "" + if !ty.ElementType().IsPrimitiveType() { + k, rest = pathSplit(key) + } + + // we don't need to convert the index keys to paths + if k == "%" { + return path, nil + } + + path = cty.Path{cty.IndexStep{Key: cty.StringVal(k)}} + + if rest == "" { + return path, nil + } + + p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeyList(key string, ty cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + k, rest := pathSplit(key) + + // we don't need to convert the index keys to paths + if key == "#" { + return path, nil + } + + idx, err := strconv.Atoi(k) + if err != nil { + return path, err + } + + path = cty.Path{cty.IndexStep{Key: cty.NumberIntVal(int64(idx))}} + + if rest == "" { + return path, nil + } + + p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeySet(key string, ty cty.Type) (cty.Path, error) { + // once we hit a set, we can't return consistent paths, so just mark the + // set as a whole changed. + return nil, nil +} + +// FlatmapKeyFromPath returns the flatmap equivalent of the given cty.Path for +// use in generating legacy style diffs. +func FlatmapKeyFromPath(path cty.Path) string { + var parts []string + + for _, step := range path { + switch step := step.(type) { + case cty.GetAttrStep: + parts = append(parts, step.Name) + case cty.IndexStep: + switch ty := step.Key.Type(); { + case ty == cty.String: + parts = append(parts, step.Key.AsString()) + case ty == cty.Number: + i, _ := step.Key.AsBigFloat().Int64() + parts = append(parts, strconv.Itoa(int(i))) + } + } + } + + return strings.Join(parts, ".") +} diff --git a/vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go b/vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go index 0b697a5f5c..daeb0b8e0d 100644 --- a/vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go +++ b/vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go @@ -6,6 +6,8 @@ import ( "github.com/hashicorp/hil/ast" "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/configs/configschema" ) // UnknownVariableValue is a sentinel value that can be used @@ -14,6 +16,108 @@ import ( // unknown keys. const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66" +// ConfigValueFromHCL2Block is like ConfigValueFromHCL2 but it works only for +// known object values and uses the provided block schema to perform some +// additional normalization to better mimic the shape of value that the old +// HCL1/HIL-based codepaths would've produced. +// +// In particular, it discards the collections that we use to represent nested +// blocks (other than NestingSingle) if they are empty, which better mimics +// the HCL1 behavior because HCL1 had no knowledge of the schema and so didn't +// know that an unspecified block _could_ exist. +// +// The given object value must conform to the schema's implied type or this +// function will panic or produce incorrect results. +// +// This is primarily useful for the final transition from new-style values to +// terraform.ResourceConfig before calling to a legacy provider, since +// helper/schema (the old provider SDK) is particularly sensitive to these +// subtle differences within its validation code. +func ConfigValueFromHCL2Block(v cty.Value, schema *configschema.Block) map[string]interface{} { + if v.IsNull() { + return nil + } + if !v.IsKnown() { + panic("ConfigValueFromHCL2Block used with unknown value") + } + if !v.Type().IsObjectType() { + panic(fmt.Sprintf("ConfigValueFromHCL2Block used with non-object value %#v", v)) + } + + atys := v.Type().AttributeTypes() + ret := make(map[string]interface{}) + + for name := range schema.Attributes { + if _, exists := atys[name]; !exists { + continue + } + + av := v.GetAttr(name) + if av.IsNull() { + // Skip nulls altogether, to better mimic how HCL1 would behave + continue + } + ret[name] = ConfigValueFromHCL2(av) + } + + for name, blockS := range schema.BlockTypes { + if _, exists := atys[name]; !exists { + continue + } + bv := v.GetAttr(name) + if !bv.IsKnown() { + ret[name] = UnknownVariableValue + continue + } + if bv.IsNull() { + continue + } + + switch blockS.Nesting { + + case configschema.NestingSingle, configschema.NestingGroup: + ret[name] = ConfigValueFromHCL2Block(bv, &blockS.Block) + + case configschema.NestingList, configschema.NestingSet: + l := bv.LengthInt() + if l == 0 { + // skip empty collections to better mimic how HCL1 would behave + continue + } + + elems := make([]interface{}, 0, l) + for it := bv.ElementIterator(); it.Next(); { + _, ev := it.Element() + if !ev.IsKnown() { + elems = append(elems, UnknownVariableValue) + continue + } + elems = append(elems, ConfigValueFromHCL2Block(ev, &blockS.Block)) + } + ret[name] = elems + + case configschema.NestingMap: + if bv.LengthInt() == 0 { + // skip empty collections to better mimic how HCL1 would behave + continue + } + + elems := make(map[string]interface{}) + for it := bv.ElementIterator(); it.Next(); { + ek, ev := it.Element() + if !ev.IsKnown() { + elems[ek.AsString()] = UnknownVariableValue + continue + } + elems[ek.AsString()] = ConfigValueFromHCL2Block(ev, &blockS.Block) + } + ret[name] = elems + } + } + + return ret +} + // ConfigValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic // types library that HCL2 uses) to a value type that matches what would've // been produced from the HCL-based interpolator for an equivalent structure. @@ -73,7 +177,10 @@ func ConfigValueFromHCL2(v cty.Value) interface{} { it := v.ElementIterator() for it.Next() { ek, ev := it.Element() - l[ek.AsString()] = ConfigValueFromHCL2(ev) + cv := ConfigValueFromHCL2(ev) + if cv != nil { + l[ek.AsString()] = cv + } } return l } diff --git a/vendor/github.com/hashicorp/terraform/config/hcl2shim/values_equiv.go b/vendor/github.com/hashicorp/terraform/config/hcl2shim/values_equiv.go new file mode 100644 index 0000000000..92f0213d72 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/hcl2shim/values_equiv.go @@ -0,0 +1,214 @@ +package hcl2shim + +import ( + "github.com/zclconf/go-cty/cty" +) + +// ValuesSDKEquivalent returns true if both of the given values seem equivalent +// as far as the legacy SDK diffing code would be concerned. +// +// Since SDK diffing is a fuzzy, inexact operation, this function is also +// fuzzy and inexact. It will err on the side of returning false if it +// encounters an ambiguous situation. Ambiguity is most common in the presence +// of sets because in practice it is impossible to exactly correlate +// nonequal-but-equivalent set elements because they have no identity separate +// from their value. +// +// This must be used _only_ for comparing values for equivalence within the +// SDK planning code. It is only meaningful to compare the "prior state" +// provided by Terraform Core with the "planned new state" produced by the +// legacy SDK code via shims. In particular it is not valid to use this +// function with their the config value or the "proposed new state" value +// because they contain only the subset of data that Terraform Core itself is +// able to determine. +func ValuesSDKEquivalent(a, b cty.Value) bool { + if a == cty.NilVal || b == cty.NilVal { + // We don't generally expect nils to appear, but we'll allow them + // for robustness since the data structures produced by legacy SDK code + // can sometimes be non-ideal. + return a == b // equivalent if they are _both_ nil + } + if a.RawEquals(b) { + // Easy case. We use RawEquals because we want two unknowns to be + // considered equal here, whereas "Equals" would return unknown. + return true + } + if !a.IsKnown() || !b.IsKnown() { + // Two unknown values are equivalent regardless of type. A known is + // never equivalent to an unknown. + return a.IsKnown() == b.IsKnown() + } + if aZero, bZero := valuesSDKEquivalentIsNullOrZero(a), valuesSDKEquivalentIsNullOrZero(b); aZero || bZero { + // Two null/zero values are equivalent regardless of type. A non-zero is + // never equivalent to a zero. + return aZero == bZero + } + + // If we get down here then we are guaranteed that both a and b are known, + // non-null values. + + aTy := a.Type() + bTy := b.Type() + switch { + case aTy.IsSetType() && bTy.IsSetType(): + return valuesSDKEquivalentSets(a, b) + case aTy.IsListType() && bTy.IsListType(): + return valuesSDKEquivalentSequences(a, b) + case aTy.IsTupleType() && bTy.IsTupleType(): + return valuesSDKEquivalentSequences(a, b) + case aTy.IsMapType() && bTy.IsMapType(): + return valuesSDKEquivalentMappings(a, b) + case aTy.IsObjectType() && bTy.IsObjectType(): + return valuesSDKEquivalentMappings(a, b) + case aTy == cty.Number && bTy == cty.Number: + return valuesSDKEquivalentNumbers(a, b) + default: + // We've now covered all the interesting cases, so anything that falls + // down here cannot be equivalent. + return false + } +} + +// valuesSDKEquivalentIsNullOrZero returns true if the given value is either +// null or is the "zero value" (in the SDK/Go sense) for its type. +func valuesSDKEquivalentIsNullOrZero(v cty.Value) bool { + if v == cty.NilVal { + return true + } + + ty := v.Type() + switch { + case !v.IsKnown(): + return false + case v.IsNull(): + return true + + // After this point, v is always known and non-null + case ty.IsListType() || ty.IsSetType() || ty.IsMapType() || ty.IsObjectType() || ty.IsTupleType(): + return v.LengthInt() == 0 + case ty == cty.String: + return v.RawEquals(cty.StringVal("")) + case ty == cty.Number: + return v.RawEquals(cty.Zero) + case ty == cty.Bool: + return v.RawEquals(cty.False) + default: + // The above is exhaustive, but for robustness we'll consider anything + // else to _not_ be zero unless it is null. + return false + } +} + +// valuesSDKEquivalentSets returns true only if each of the elements in a can +// be correlated with at least one equivalent element in b and vice-versa. +// This is a fuzzy operation that prefers to signal non-equivalence if it cannot +// be certain that all elements are accounted for. +func valuesSDKEquivalentSets(a, b cty.Value) bool { + if aLen, bLen := a.LengthInt(), b.LengthInt(); aLen != bLen { + return false + } + + // Our methodology here is a little tricky, to deal with the fact that + // it's impossible to directly correlate two non-equal set elements because + // they don't have identities separate from their values. + // The approach is to count the number of equivalent elements each element + // of a has in b and vice-versa, and then return true only if each element + // in both sets has at least one equivalent. + as := a.AsValueSlice() + bs := b.AsValueSlice() + aeqs := make([]bool, len(as)) + beqs := make([]bool, len(bs)) + for ai, av := range as { + for bi, bv := range bs { + if ValuesSDKEquivalent(av, bv) { + aeqs[ai] = true + beqs[bi] = true + } + } + } + + for _, eq := range aeqs { + if !eq { + return false + } + } + for _, eq := range beqs { + if !eq { + return false + } + } + return true +} + +// valuesSDKEquivalentSequences decides equivalence for two sequence values +// (lists or tuples). +func valuesSDKEquivalentSequences(a, b cty.Value) bool { + as := a.AsValueSlice() + bs := b.AsValueSlice() + if len(as) != len(bs) { + return false + } + + for i := range as { + if !ValuesSDKEquivalent(as[i], bs[i]) { + return false + } + } + return true +} + +// valuesSDKEquivalentMappings decides equivalence for two mapping values +// (maps or objects). +func valuesSDKEquivalentMappings(a, b cty.Value) bool { + as := a.AsValueMap() + bs := b.AsValueMap() + if len(as) != len(bs) { + return false + } + + for k, av := range as { + bv, ok := bs[k] + if !ok { + return false + } + if !ValuesSDKEquivalent(av, bv) { + return false + } + } + return true +} + +// valuesSDKEquivalentNumbers decides equivalence for two number values based +// on the fact that the SDK uses int and float64 representations while +// cty (and thus Terraform Core) uses big.Float, and so we expect to lose +// precision in the round-trip. +// +// This does _not_ attempt to allow for an epsilon difference that may be +// caused by accumulated innacuracy in a float calculation, under the +// expectation that providers generally do not actually do compuations on +// floats and instead just pass string representations of them on verbatim +// to remote APIs. A remote API _itself_ may introduce inaccuracy, but that's +// a problem for the provider itself to deal with, based on its knowledge of +// the remote system, e.g. using DiffSuppressFunc. +func valuesSDKEquivalentNumbers(a, b cty.Value) bool { + if a.RawEquals(b) { + return true // easy + } + + af := a.AsBigFloat() + bf := b.AsBigFloat() + + if af.IsInt() != bf.IsInt() { + return false + } + if af.IsInt() && bf.IsInt() { + return false // a.RawEquals(b) test above is good enough for integers + } + + // The SDK supports only int and float64, so if it's not an integer + // we know that only a float64-level of precision can possibly be + // significant. + af64, _ := af.Float64() + bf64, _ := bf.Float64() + return af64 == bf64 +} diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go index b94fca8896..6a2050c91f 100644 --- a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go +++ b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go @@ -47,6 +47,20 @@ func stringSliceToVariableValue(values []string) []ast.Variable { return output } +// listVariableSliceToVariableValue converts a list of lists into the value +// required to be returned from interpolation functions which return TypeList. +func listVariableSliceToVariableValue(values [][]ast.Variable) []ast.Variable { + output := make([]ast.Variable, len(values)) + + for index, value := range values { + output[index] = ast.Variable{ + Type: ast.TypeList, + Value: value, + } + } + return output +} + func listVariableValueToStringSlice(values []ast.Variable) ([]string, error) { output := make([]string, len(values)) for index, value := range values { @@ -106,6 +120,7 @@ func Funcs() map[string]ast.Function { "pow": interpolationFuncPow(), "uuid": interpolationFuncUUID(), "replace": interpolationFuncReplace(), + "reverse": interpolationFuncReverse(), "rsadecrypt": interpolationFuncRsaDecrypt(), "sha1": interpolationFuncSha1(), "sha256": interpolationFuncSha256(), @@ -941,6 +956,25 @@ func interpolationFuncReplace() ast.Function { } } +// interpolationFuncReverse implements the "reverse" function that does list reversal +func interpolationFuncReverse() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeList}, + ReturnType: ast.TypeList, + Variadic: false, + Callback: func(args []interface{}) (interface{}, error) { + inputList := args[0].([]ast.Variable) + + reversedList := make([]ast.Variable, len(inputList)) + for idx := range inputList { + reversedList[len(inputList)-1-idx] = inputList[idx] + } + + return reversedList, nil + }, + } +} + func interpolationFuncLength() ast.Function { return ast.Function{ ArgTypes: []ast.Type{ast.TypeAny}, diff --git a/vendor/github.com/hashicorp/terraform/config/module/storage.go b/vendor/github.com/hashicorp/terraform/config/module/storage.go index 4b828dcb08..7734cbc0bd 100644 --- a/vendor/github.com/hashicorp/terraform/config/module/storage.go +++ b/vendor/github.com/hashicorp/terraform/config/module/storage.go @@ -312,7 +312,7 @@ func (s Storage) findRegistryModule(mSource, constraint string) (moduleRecord, e // we need to lookup available versions // Only on Get if it's not found, on unconditionally on Update if (s.Mode == GetModeGet && !found) || (s.Mode == GetModeUpdate) { - resp, err := s.registry.Versions(mod) + resp, err := s.registry.ModuleVersions(mod) if err != nil { return rec, err } @@ -332,7 +332,7 @@ func (s Storage) findRegistryModule(mSource, constraint string) (moduleRecord, e rec.Version = match.Version - rec.url, err = s.registry.Location(mod, rec.Version) + rec.url, err = s.registry.ModuleLocation(mod, rec.Version) if err != nil { return rec, err } diff --git a/vendor/github.com/hashicorp/terraform/configs/backend.go b/vendor/github.com/hashicorp/terraform/configs/backend.go new file mode 100644 index 0000000000..6df7ddd02e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/backend.go @@ -0,0 +1,55 @@ +package configs + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcldec" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// Backend represents a "backend" block inside a "terraform" block in a module +// or file. +type Backend struct { + Type string + Config hcl.Body + + TypeRange hcl.Range + DeclRange hcl.Range +} + +func decodeBackendBlock(block *hcl.Block) (*Backend, hcl.Diagnostics) { + return &Backend{ + Type: block.Labels[0], + TypeRange: block.LabelRanges[0], + Config: block.Body, + DeclRange: block.DefRange, + }, nil +} + +// Hash produces a hash value for the reciever that covers the type and the +// portions of the config that conform to the given schema. +// +// If the config does not conform to the schema then the result is not +// meaningful for comparison since it will be based on an incomplete result. +// +// As an exception, required attributes in the schema are treated as optional +// for the purpose of hashing, so that an incomplete configuration can still +// be hashed. Other errors, such as extraneous attributes, have no such special +// case. +func (b *Backend) Hash(schema *configschema.Block) int { + // Don't fail if required attributes are not set. Instead, we'll just + // hash them as nulls. + schema = schema.NoneRequired() + spec := schema.DecoderSpec() + val, _ := hcldec.Decode(b.Config, spec, nil) + if val == cty.NilVal { + val = cty.UnknownVal(schema.ImpliedType()) + } + + toHash := cty.TupleVal([]cty.Value{ + cty.StringVal(b.Type), + val, + }) + + return toHash.Hash() +} diff --git a/vendor/github.com/hashicorp/terraform/configs/compat_shim.go b/vendor/github.com/hashicorp/terraform/configs/compat_shim.go new file mode 100644 index 0000000000..66037fcdce --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/compat_shim.go @@ -0,0 +1,116 @@ +package configs + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" +) + +// ------------------------------------------------------------------------- +// Functions in this file are compatibility shims intended to ease conversion +// from the old configuration loader. Any use of these functions that makes +// a change should generate a deprecation warning explaining to the user how +// to update their code for new patterns. +// +// Shims are particularly important for any patterns that have been widely +// documented in books, tutorials, etc. Users will still be starting from +// these examples and we want to help them adopt the latest patterns rather +// than leave them stranded. +// ------------------------------------------------------------------------- + +// shimTraversalInString takes any arbitrary expression and checks if it is +// a quoted string in the native syntax. If it _is_, then it is parsed as a +// traversal and re-wrapped into a synthetic traversal expression and a +// warning is generated. Otherwise, the given expression is just returned +// verbatim. +// +// This function has no effect on expressions from the JSON syntax, since +// traversals in strings are the required pattern in that syntax. +// +// If wantKeyword is set, the generated warning diagnostic will talk about +// keywords rather than references. The behavior is otherwise unchanged, and +// the caller remains responsible for checking that the result is indeed +// a keyword, e.g. using hcl.ExprAsKeyword. +func shimTraversalInString(expr hcl.Expression, wantKeyword bool) (hcl.Expression, hcl.Diagnostics) { + // ObjectConsKeyExpr is a special wrapper type used for keys on object + // constructors to deal with the fact that naked identifiers are normally + // handled as "bareword" strings rather than as variable references. Since + // we know we're interpreting as a traversal anyway (and thus it won't + // matter whether it's a string or an identifier) we can safely just unwrap + // here and then process whatever we find inside as normal. + if ocke, ok := expr.(*hclsyntax.ObjectConsKeyExpr); ok { + expr = ocke.Wrapped + } + + if !exprIsNativeQuotedString(expr) { + return expr, nil + } + + strVal, diags := expr.Value(nil) + if diags.HasErrors() || strVal.IsNull() || !strVal.IsKnown() { + // Since we're not even able to attempt a shim here, we'll discard + // the diagnostics we saw so far and let the caller's own error + // handling take care of reporting the invalid expression. + return expr, nil + } + + // The position handling here isn't _quite_ right because it won't + // take into account any escape sequences in the literal string, but + // it should be close enough for any error reporting to make sense. + srcRange := expr.Range() + startPos := srcRange.Start // copy + startPos.Column++ // skip initial quote + startPos.Byte++ // skip initial quote + + traversal, tDiags := hclsyntax.ParseTraversalAbs( + []byte(strVal.AsString()), + srcRange.Filename, + startPos, + ) + diags = append(diags, tDiags...) + + // For initial release our deprecation warnings are disabled to allow + // a period where modules can be compatible with both old and new + // conventions. + // FIXME: Re-enable these deprecation warnings in a release prior to + // Terraform 0.13 and then remove the shims altogether for 0.13. + /* + if wantKeyword { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Quoted keywords are deprecated", + Detail: "In this context, keywords are expected literally rather than in quotes. Previous versions of Terraform required quotes, but that usage is now deprecated. Remove the quotes surrounding this keyword to silence this warning.", + Subject: &srcRange, + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Quoted references are deprecated", + Detail: "In this context, references are expected literally rather than in quotes. Previous versions of Terraform required quotes, but that usage is now deprecated. Remove the quotes surrounding this reference to silence this warning.", + Subject: &srcRange, + }) + } + */ + + return &hclsyntax.ScopeTraversalExpr{ + Traversal: traversal, + SrcRange: srcRange, + }, diags +} + +// shimIsIgnoreChangesStar returns true if the given expression seems to be +// a string literal whose value is "*". This is used to support a legacy +// form of ignore_changes = all . +// +// This function does not itself emit any diagnostics, so it's the caller's +// responsibility to emit a warning diagnostic when this function returns true. +func shimIsIgnoreChangesStar(expr hcl.Expression) bool { + val, valDiags := expr.Value(nil) + if valDiags.HasErrors() { + return false + } + if val.Type() != cty.String || val.IsNull() || !val.IsKnown() { + return false + } + return val.AsString() == "*" +} diff --git a/vendor/github.com/hashicorp/terraform/configs/config.go b/vendor/github.com/hashicorp/terraform/configs/config.go new file mode 100644 index 0000000000..82943122e5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/config.go @@ -0,0 +1,205 @@ +package configs + +import ( + "sort" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/addrs" +) + +// A Config is a node in the tree of modules within a configuration. +// +// The module tree is constructed by following ModuleCall instances recursively +// through the root module transitively into descendent modules. +// +// A module tree described in *this* package represents the static tree +// represented by configuration. During evaluation a static ModuleNode may +// expand into zero or more module instances depending on the use of count and +// for_each configuration attributes within each call. +type Config struct { + // RootModule points to the Config for the root module within the same + // module tree as this module. If this module _is_ the root module then + // this is self-referential. + Root *Config + + // ParentModule points to the Config for the module that directly calls + // this module. If this is the root module then this field is nil. + Parent *Config + + // Path is a sequence of module logical names that traverse from the root + // module to this config. Path is empty for the root module. + // + // This should only be used to display paths to the end-user in rare cases + // where we are talking about the static module tree, before module calls + // have been resolved. In most cases, an addrs.ModuleInstance describing + // a node in the dynamic module tree is better, since it will then include + // any keys resulting from evaluating "count" and "for_each" arguments. + Path addrs.Module + + // ChildModules points to the Config for each of the direct child modules + // called from this module. The keys in this map match the keys in + // Module.ModuleCalls. + Children map[string]*Config + + // Module points to the object describing the configuration for the + // various elements (variables, resources, etc) defined by this module. + Module *Module + + // CallRange is the source range for the header of the module block that + // requested this module. + // + // This field is meaningless for the root module, where its contents are undefined. + CallRange hcl.Range + + // SourceAddr is the source address that the referenced module was requested + // from, as specified in configuration. + // + // This field is meaningless for the root module, where its contents are undefined. + SourceAddr string + + // SourceAddrRange is the location in the configuration source where the + // SourceAddr value was set, for use in diagnostic messages. + // + // This field is meaningless for the root module, where its contents are undefined. + SourceAddrRange hcl.Range + + // Version is the specific version that was selected for this module, + // based on version constraints given in configuration. + // + // This field is nil if the module was loaded from a non-registry source, + // since versions are not supported for other sources. + // + // This field is meaningless for the root module, where it will always + // be nil. + Version *version.Version +} + +// NewEmptyConfig constructs a single-node configuration tree with an empty +// root module. This is generally a pretty useless thing to do, so most callers +// should instead use BuildConfig. +func NewEmptyConfig() *Config { + ret := &Config{} + ret.Root = ret + ret.Children = make(map[string]*Config) + ret.Module = &Module{} + return ret +} + +// Depth returns the number of "hops" the receiver is from the root of its +// module tree, with the root module having a depth of zero. +func (c *Config) Depth() int { + ret := 0 + this := c + for this.Parent != nil { + ret++ + this = this.Parent + } + return ret +} + +// DeepEach calls the given function once for each module in the tree, starting +// with the receiver. +// +// A parent is always called before its children and children of a particular +// node are visited in lexicographic order by their names. +func (c *Config) DeepEach(cb func(c *Config)) { + cb(c) + + names := make([]string, 0, len(c.Children)) + for name := range c.Children { + names = append(names, name) + } + + for _, name := range names { + c.Children[name].DeepEach(cb) + } +} + +// AllModules returns a slice of all the receiver and all of its descendent +// nodes in the module tree, in the same order they would be visited by +// DeepEach. +func (c *Config) AllModules() []*Config { + var ret []*Config + c.DeepEach(func(c *Config) { + ret = append(ret, c) + }) + return ret +} + +// Descendent returns the descendent config that has the given path beneath +// the receiver, or nil if there is no such module. +// +// The path traverses the static module tree, prior to any expansion to handle +// count and for_each arguments. +// +// An empty path will just return the receiver, and is therefore pointless. +func (c *Config) Descendent(path addrs.Module) *Config { + current := c + for _, name := range path { + current = current.Children[name] + if current == nil { + return nil + } + } + return current +} + +// DescendentForInstance is like Descendent except that it accepts a path +// to a particular module instance in the dynamic module graph, returning +// the node from the static module graph that corresponds to it. +// +// All instances created by a particular module call share the same +// configuration, so the keys within the given path are disregarded. +func (c *Config) DescendentForInstance(path addrs.ModuleInstance) *Config { + current := c + for _, step := range path { + current = current.Children[step.Name] + if current == nil { + return nil + } + } + return current +} + +// ProviderTypes returns the names of each distinct provider type referenced +// in the receiving configuration. +// +// This is a helper for easily determining which provider types are required +// to fully interpret the configuration, though it does not include version +// information and so callers are expected to have already dealt with +// provider version selection in an earlier step and have identified suitable +// versions for each provider. +func (c *Config) ProviderTypes() []string { + m := make(map[string]struct{}) + c.gatherProviderTypes(m) + + ret := make([]string, 0, len(m)) + for k := range m { + ret = append(ret, k) + } + sort.Strings(ret) + return ret +} +func (c *Config) gatherProviderTypes(m map[string]struct{}) { + if c == nil { + return + } + + for _, pc := range c.Module.ProviderConfigs { + m[pc.Name] = struct{}{} + } + for _, rc := range c.Module.ManagedResources { + providerAddr := rc.ProviderConfigAddr() + m[providerAddr.Type] = struct{}{} + } + for _, rc := range c.Module.DataResources { + providerAddr := rc.ProviderConfigAddr() + m[providerAddr.Type] = struct{}{} + } + + // Must also visit our child modules, recursively. + for _, cc := range c.Children { + cc.gatherProviderTypes(m) + } +} diff --git a/vendor/github.com/hashicorp/terraform/configs/config_build.go b/vendor/github.com/hashicorp/terraform/configs/config_build.go new file mode 100644 index 0000000000..948b2c8ffd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/config_build.go @@ -0,0 +1,179 @@ +package configs + +import ( + "sort" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/addrs" +) + +// BuildConfig constructs a Config from a root module by loading all of its +// descendent modules via the given ModuleWalker. +// +// The result is a module tree that has so far only had basic module- and +// file-level invariants validated. If the returned diagnostics contains errors, +// the returned module tree may be incomplete but can still be used carefully +// for static analysis. +func BuildConfig(root *Module, walker ModuleWalker) (*Config, hcl.Diagnostics) { + var diags hcl.Diagnostics + cfg := &Config{ + Module: root, + } + cfg.Root = cfg // Root module is self-referential. + cfg.Children, diags = buildChildModules(cfg, walker) + return cfg, diags +} + +func buildChildModules(parent *Config, walker ModuleWalker) (map[string]*Config, hcl.Diagnostics) { + var diags hcl.Diagnostics + ret := map[string]*Config{} + + calls := parent.Module.ModuleCalls + + // We'll sort the calls by their local names so that they'll appear in a + // predictable order in any logging that's produced during the walk. + callNames := make([]string, 0, len(calls)) + for k := range calls { + callNames = append(callNames, k) + } + sort.Strings(callNames) + + for _, callName := range callNames { + call := calls[callName] + path := make([]string, len(parent.Path)+1) + copy(path, parent.Path) + path[len(path)-1] = call.Name + + req := ModuleRequest{ + Name: call.Name, + Path: path, + SourceAddr: call.SourceAddr, + SourceAddrRange: call.SourceAddrRange, + VersionConstraint: call.Version, + Parent: parent, + CallRange: call.DeclRange, + } + + mod, ver, modDiags := walker.LoadModule(&req) + diags = append(diags, modDiags...) + if mod == nil { + // nil can be returned if the source address was invalid and so + // nothing could be loaded whatsoever. LoadModule should've + // returned at least one error diagnostic in that case. + continue + } + + child := &Config{ + Parent: parent, + Root: parent.Root, + Path: path, + Module: mod, + CallRange: call.DeclRange, + SourceAddr: call.SourceAddr, + SourceAddrRange: call.SourceAddrRange, + Version: ver, + } + + child.Children, modDiags = buildChildModules(child, walker) + + ret[call.Name] = child + } + + return ret, diags +} + +// A ModuleWalker knows how to find and load a child module given details about +// the module to be loaded and a reference to its partially-loaded parent +// Config. +type ModuleWalker interface { + // LoadModule finds and loads a requested child module. + // + // If errors are detected during loading, implementations should return them + // in the diagnostics object. If the diagnostics object contains any errors + // then the caller will tolerate the returned module being nil or incomplete. + // If no errors are returned, it should be non-nil and complete. + // + // Full validation need not have been performed but an implementation should + // ensure that the basic file- and module-validations performed by the + // LoadConfigDir function (valid syntax, no namespace collisions, etc) have + // been performed before returning a module. + LoadModule(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) +} + +// ModuleWalkerFunc is an implementation of ModuleWalker that directly wraps +// a callback function, for more convenient use of that interface. +type ModuleWalkerFunc func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) + +// LoadModule implements ModuleWalker. +func (f ModuleWalkerFunc) LoadModule(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) { + return f(req) +} + +// ModuleRequest is used with the ModuleWalker interface to describe a child +// module that must be loaded. +type ModuleRequest struct { + // Name is the "logical name" of the module call within configuration. + // This is provided in case the name is used as part of a storage key + // for the module, but implementations must otherwise treat it as an + // opaque string. It is guaranteed to have already been validated as an + // HCL identifier and UTF-8 encoded. + Name string + + // Path is a list of logical names that traverse from the root module to + // this module. This can be used, for example, to form a lookup key for + // each distinct module call in a configuration, allowing for multiple + // calls with the same name at different points in the tree. + Path addrs.Module + + // SourceAddr is the source address string provided by the user in + // configuration. + SourceAddr string + + // SourceAddrRange is the source range for the SourceAddr value as it + // was provided in configuration. This can and should be used to generate + // diagnostics about the source address having invalid syntax, referring + // to a non-existent object, etc. + SourceAddrRange hcl.Range + + // VersionConstraint is the version constraint applied to the module in + // configuration. This data structure includes the source range for + // the constraint, which can and should be used to generate diagnostics + // about constraint-related issues, such as constraints that eliminate all + // available versions of a module whose source is otherwise valid. + VersionConstraint VersionConstraint + + // Parent is the partially-constructed module tree node that the loaded + // module will be added to. Callers may refer to any field of this + // structure except Children, which is still under construction when + // ModuleRequest objects are created and thus has undefined content. + // The main reason this is provided is so that full module paths can + // be constructed for uniqueness. + Parent *Config + + // CallRange is the source range for the header of the "module" block + // in configuration that prompted this request. This can be used as the + // subject of an error diagnostic that relates to the module call itself, + // rather than to either its source address or its version number. + CallRange hcl.Range +} + +// DisabledModuleWalker is a ModuleWalker that doesn't support +// child modules at all, and so will return an error if asked to load one. +// +// This is provided primarily for testing. There is no good reason to use this +// in the main application. +var DisabledModuleWalker ModuleWalker + +func init() { + DisabledModuleWalker = ModuleWalkerFunc(func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) { + return nil, nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Child modules are not supported", + Detail: "Child module calls are not allowed in this context.", + Subject: &req.CallRange, + }, + } + }) +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/copy_dir.go b/vendor/github.com/hashicorp/terraform/configs/configload/copy_dir.go new file mode 100644 index 0000000000..ebbeb3b629 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/copy_dir.go @@ -0,0 +1,125 @@ +package configload + +import ( + "io" + "os" + "path/filepath" + "strings" +) + +// copyDir copies the src directory contents into dst. Both directories +// should already exist. +func copyDir(dst, src string) error { + src, err := filepath.EvalSymlinks(src) + if err != nil { + return err + } + + walkFn := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if path == src { + return nil + } + + if strings.HasPrefix(filepath.Base(path), ".") { + // Skip any dot files + if info.IsDir() { + return filepath.SkipDir + } else { + return nil + } + } + + // The "path" has the src prefixed to it. We need to join our + // destination with the path without the src on it. + dstPath := filepath.Join(dst, path[len(src):]) + + // we don't want to try and copy the same file over itself. + if eq, err := sameFile(path, dstPath); eq { + return nil + } else if err != nil { + return err + } + + // If we have a directory, make that subdirectory, then continue + // the walk. + if info.IsDir() { + if path == filepath.Join(src, dst) { + // dst is in src; don't walk it. + return nil + } + + if err := os.MkdirAll(dstPath, 0755); err != nil { + return err + } + + return nil + } + + // If the current path is a symlink, recreate the symlink relative to + // the dst directory + if info.Mode()&os.ModeSymlink == os.ModeSymlink { + target, err := os.Readlink(path) + if err != nil { + return err + } + + return os.Symlink(target, dstPath) + } + + // If we have a file, copy the contents. + srcF, err := os.Open(path) + if err != nil { + return err + } + defer srcF.Close() + + dstF, err := os.Create(dstPath) + if err != nil { + return err + } + defer dstF.Close() + + if _, err := io.Copy(dstF, srcF); err != nil { + return err + } + + // Chmod it + return os.Chmod(dstPath, info.Mode()) + } + + return filepath.Walk(src, walkFn) +} + +// sameFile tried to determine if to paths are the same file. +// If the paths don't match, we lookup the inode on supported systems. +func sameFile(a, b string) (bool, error) { + if a == b { + return true, nil + } + + aIno, err := inode(a) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + + bIno, err := inode(b) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + + if aIno > 0 && aIno == bIno { + return true, nil + } + + return false, nil +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/doc.go b/vendor/github.com/hashicorp/terraform/configs/configload/doc.go new file mode 100644 index 0000000000..8b615f9026 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/doc.go @@ -0,0 +1,4 @@ +// Package configload knows how to install modules into the .terraform/modules +// directory and to load modules from those installed locations. It is used +// in conjunction with the LoadConfig function in the parent package. +package configload diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/getter.go b/vendor/github.com/hashicorp/terraform/configs/configload/getter.go new file mode 100644 index 0000000000..4a3daceee4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/getter.go @@ -0,0 +1,150 @@ +package configload + +import ( + "fmt" + "log" + "os" + "path/filepath" + + cleanhttp "github.com/hashicorp/go-cleanhttp" + getter "github.com/hashicorp/go-getter" +) + +// We configure our own go-getter detector and getter sets here, because +// the set of sources we support is part of Terraform's documentation and +// so we don't want any new sources introduced in go-getter to sneak in here +// and work even though they aren't documented. This also insulates us from +// any meddling that might be done by other go-getter callers linked into our +// executable. + +var goGetterDetectors = []getter.Detector{ + new(getter.GitHubDetector), + new(getter.BitBucketDetector), + new(getter.S3Detector), + new(getter.FileDetector), +} + +var goGetterNoDetectors = []getter.Detector{} + +var goGetterDecompressors = map[string]getter.Decompressor{ + "bz2": new(getter.Bzip2Decompressor), + "gz": new(getter.GzipDecompressor), + "xz": new(getter.XzDecompressor), + "zip": new(getter.ZipDecompressor), + + "tar.bz2": new(getter.TarBzip2Decompressor), + "tar.tbz2": new(getter.TarBzip2Decompressor), + + "tar.gz": new(getter.TarGzipDecompressor), + "tgz": new(getter.TarGzipDecompressor), + + "tar.xz": new(getter.TarXzDecompressor), + "txz": new(getter.TarXzDecompressor), +} + +var goGetterGetters = map[string]getter.Getter{ + "file": new(getter.FileGetter), + "git": new(getter.GitGetter), + "hg": new(getter.HgGetter), + "s3": new(getter.S3Getter), + "http": getterHTTPGetter, + "https": getterHTTPGetter, +} + +var getterHTTPClient = cleanhttp.DefaultClient() + +var getterHTTPGetter = &getter.HttpGetter{ + Client: getterHTTPClient, + Netrc: true, +} + +// A reusingGetter is a helper for the module installer that remembers +// the final resolved addresses of all of the sources it has already been +// asked to install, and will copy from a prior installation directory if +// it has the same resolved source address. +// +// The keys in a reusingGetter are resolved and trimmed source addresses +// (with a scheme always present, and without any "subdir" component), +// and the values are the paths where each source was previously installed. +type reusingGetter map[string]string + +// getWithGoGetter retrieves the package referenced in the given address +// into the installation path and then returns the full path to any subdir +// indicated in the address. +// +// The errors returned by this function are those surfaced by the underlying +// go-getter library, which have very inconsistent quality as +// end-user-actionable error messages. At this time we do not have any +// reasonable way to improve these error messages at this layer because +// the underlying errors are not separatelyr recognizable. +func (g reusingGetter) getWithGoGetter(instPath, addr string) (string, error) { + packageAddr, subDir := splitAddrSubdir(addr) + + log.Printf("[DEBUG] will download %q to %s", packageAddr, instPath) + + realAddr, err := getter.Detect(packageAddr, instPath, getter.Detectors) + if err != nil { + return "", err + } + + var realSubDir string + realAddr, realSubDir = splitAddrSubdir(realAddr) + if realSubDir != "" { + subDir = filepath.Join(realSubDir, subDir) + } + + if realAddr != packageAddr { + log.Printf("[TRACE] go-getter detectors rewrote %q to %q", packageAddr, realAddr) + } + + if prevDir, exists := g[realAddr]; exists { + log.Printf("[TRACE] copying previous install %s to %s", prevDir, instPath) + err := os.Mkdir(instPath, os.ModePerm) + if err != nil { + return "", fmt.Errorf("failed to create directory %s: %s", instPath, err) + } + err = copyDir(instPath, prevDir) + if err != nil { + return "", fmt.Errorf("failed to copy from %s to %s: %s", prevDir, instPath, err) + } + } else { + log.Printf("[TRACE] fetching %q to %q", realAddr, instPath) + client := getter.Client{ + Src: realAddr, + Dst: instPath, + Pwd: instPath, + + Mode: getter.ClientModeDir, + + Detectors: goGetterNoDetectors, // we already did detection above + Decompressors: goGetterDecompressors, + Getters: goGetterGetters, + } + err = client.Get() + if err != nil { + return "", err + } + // Remember where we installed this so we might reuse this directory + // on subsequent calls to avoid re-downloading. + g[realAddr] = instPath + } + + // Our subDir string can contain wildcards until this point, so that + // e.g. a subDir of * can expand to one top-level directory in a .tar.gz + // archive. Now that we've expanded the archive successfully we must + // resolve that into a concrete path. + var finalDir string + if subDir != "" { + finalDir, err = getter.SubdirGlob(instPath, subDir) + log.Printf("[TRACE] expanded %q to %q", subDir, finalDir) + if err != nil { + return "", err + } + } else { + finalDir = instPath + } + + // If we got this far then we have apparently succeeded in downloading + // the requested object! + return filepath.Clean(finalDir), nil +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/inode.go b/vendor/github.com/hashicorp/terraform/configs/configload/inode.go new file mode 100644 index 0000000000..57df04145a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/inode.go @@ -0,0 +1,21 @@ +// +build linux darwin openbsd netbsd solaris dragonfly + +package configload + +import ( + "fmt" + "os" + "syscall" +) + +// lookup the inode of a file on posix systems +func inode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + if st, ok := stat.Sys().(*syscall.Stat_t); ok { + return st.Ino, nil + } + return 0, fmt.Errorf("could not determine file inode") +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/inode_freebsd.go b/vendor/github.com/hashicorp/terraform/configs/configload/inode_freebsd.go new file mode 100644 index 0000000000..4dc28eaa89 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/inode_freebsd.go @@ -0,0 +1,21 @@ +// +build freebsd + +package configload + +import ( + "fmt" + "os" + "syscall" +) + +// lookup the inode of a file on posix systems +func inode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + if st, ok := stat.Sys().(*syscall.Stat_t); ok { + return uint64(st.Ino), nil + } + return 0, fmt.Errorf("could not determine file inode") +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/inode_windows.go b/vendor/github.com/hashicorp/terraform/configs/configload/inode_windows.go new file mode 100644 index 0000000000..0d22e67264 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/inode_windows.go @@ -0,0 +1,8 @@ +// +build windows + +package configload + +// no syscall.Stat_t on windows, return 0 for inodes +func inode(path string) (uint64, error) { + return 0, nil +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/loader.go b/vendor/github.com/hashicorp/terraform/configs/configload/loader.go new file mode 100644 index 0000000000..416b48fc8e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/loader.go @@ -0,0 +1,150 @@ +package configload + +import ( + "fmt" + "path/filepath" + + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/registry" + "github.com/hashicorp/terraform/svchost/disco" + "github.com/spf13/afero" +) + +// A Loader instance is the main entry-point for loading configurations via +// this package. +// +// It extends the general config-loading functionality in the parent package +// "configs" to support installation of modules from remote sources and +// loading full configurations using modules that were previously installed. +type Loader struct { + // parser is used to read configuration + parser *configs.Parser + + // modules is used to install and locate descendent modules that are + // referenced (directly or indirectly) from the root module. + modules moduleMgr +} + +// Config is used with NewLoader to specify configuration arguments for the +// loader. +type Config struct { + // ModulesDir is a path to a directory where descendent modules are + // (or should be) installed. (This is usually the + // .terraform/modules directory, in the common case where this package + // is being loaded from the main Terraform CLI package.) + ModulesDir string + + // Services is the service discovery client to use when locating remote + // module registry endpoints. If this is nil then registry sources are + // not supported, which should be true only in specialized circumstances + // such as in tests. + Services *disco.Disco +} + +// NewLoader creates and returns a loader that reads configuration from the +// real OS filesystem. +// +// The loader has some internal state about the modules that are currently +// installed, which is read from disk as part of this function. If that +// manifest cannot be read then an error will be returned. +func NewLoader(config *Config) (*Loader, error) { + fs := afero.NewOsFs() + parser := configs.NewParser(fs) + reg := registry.NewClient(config.Services, nil) + + ret := &Loader{ + parser: parser, + modules: moduleMgr{ + FS: afero.Afero{Fs: fs}, + CanInstall: true, + Dir: config.ModulesDir, + Services: config.Services, + Registry: reg, + }, + } + + err := ret.modules.readModuleManifestSnapshot() + if err != nil { + return nil, fmt.Errorf("failed to read module manifest: %s", err) + } + + return ret, nil +} + +// ModulesDir returns the path to the directory where the loader will look for +// the local cache of remote module packages. +func (l *Loader) ModulesDir() string { + return l.modules.Dir +} + +// RefreshModules updates the in-memory cache of the module manifest from the +// module manifest file on disk. This is not necessary in normal use because +// module installation and configuration loading are separate steps, but it +// can be useful in tests where module installation is done as a part of +// configuration loading by a helper function. +// +// Call this function after any module installation where an existing loader +// is already alive and may be used again later. +// +// An error is returned if the manifest file cannot be read. +func (l *Loader) RefreshModules() error { + if l == nil { + // Nothing to do, then. + return nil + } + return l.modules.readModuleManifestSnapshot() +} + +// Parser returns the underlying parser for this loader. +// +// This is useful for loading other sorts of files than the module directories +// that a loader deals with, since then they will share the source code cache +// for this loader and can thus be shown as snippets in diagnostic messages. +func (l *Loader) Parser() *configs.Parser { + return l.parser +} + +// Sources returns the source code cache for the underlying parser of this +// loader. This is a shorthand for l.Parser().Sources(). +func (l *Loader) Sources() map[string][]byte { + return l.parser.Sources() +} + +// IsConfigDir returns true if and only if the given directory contains at +// least one Terraform configuration file. This is a wrapper around calling +// the same method name on the loader's parser. +func (l *Loader) IsConfigDir(path string) bool { + return l.parser.IsConfigDir(path) +} + +// ImportSources writes into the receiver's source code the given source +// code buffers. +// +// This is useful in the situation where an ancillary loader is created for +// some reason (e.g. loading config from a plan file) but the cached source +// code from that loader must be imported into the "main" loader in order +// to return source code snapshots in diagnostic messages. +// +// loader.ImportSources(otherLoader.Sources()) +func (l *Loader) ImportSources(sources map[string][]byte) { + p := l.Parser() + for name, src := range sources { + p.ForceFileSource(name, src) + } +} + +// ImportSourcesFromSnapshot writes into the receiver's source code the +// source files from the given snapshot. +// +// This is similar to ImportSources but knows how to unpack and flatten a +// snapshot data structure to get the corresponding flat source file map. +func (l *Loader) ImportSourcesFromSnapshot(snap *Snapshot) { + p := l.Parser() + for _, m := range snap.Modules { + baseDir := m.Dir + for fn, src := range m.Files { + fullPath := filepath.Join(baseDir, fn) + p.ForceFileSource(fullPath, src) + } + } +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/loader_load.go b/vendor/github.com/hashicorp/terraform/configs/configload/loader_load.go new file mode 100644 index 0000000000..93a94204fc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/loader_load.go @@ -0,0 +1,97 @@ +package configload + +import ( + "fmt" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/configs" +) + +// LoadConfig reads the Terraform module in the given directory and uses it as the +// root module to build the static module tree that represents a configuration, +// assuming that all required descendent modules have already been installed. +// +// If error diagnostics are returned, the returned configuration may be either +// nil or incomplete. In the latter case, cautious static analysis is possible +// in spite of the errors. +// +// LoadConfig performs the basic syntax and uniqueness validations that are +// required to process the individual modules, and also detects +func (l *Loader) LoadConfig(rootDir string) (*configs.Config, hcl.Diagnostics) { + rootMod, diags := l.parser.LoadConfigDir(rootDir) + if rootMod == nil { + return nil, diags + } + + cfg, cDiags := configs.BuildConfig(rootMod, configs.ModuleWalkerFunc(l.moduleWalkerLoad)) + diags = append(diags, cDiags...) + + return cfg, diags +} + +// moduleWalkerLoad is a configs.ModuleWalkerFunc for loading modules that +// are presumed to have already been installed. A different function +// (moduleWalkerInstall) is used for installation. +func (l *Loader) moduleWalkerLoad(req *configs.ModuleRequest) (*configs.Module, *version.Version, hcl.Diagnostics) { + // Since we're just loading here, we expect that all referenced modules + // will be already installed and described in our manifest. However, we + // do verify that the manifest and the configuration are in agreement + // so that we can prompt the user to run "terraform init" if not. + + key := l.modules.manifest.ModuleKey(req.Path) + record, exists := l.modules.manifest[key] + + if !exists { + return nil, nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Module not installed", + Detail: "This module is not yet installed. Run \"terraform init\" to install all modules required by this configuration.", + Subject: &req.CallRange, + }, + } + } + + var diags hcl.Diagnostics + + // Check for inconsistencies between manifest and config + if req.SourceAddr != record.SourceAddr { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Module source has changed", + Detail: "The source address was changed since this module was installed. Run \"terraform init\" to install all modules required by this configuration.", + Subject: &req.SourceAddrRange, + }) + } + if !req.VersionConstraint.Required.Check(record.Version) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Module version requirements have changed", + Detail: fmt.Sprintf( + "The version requirements have changed since this module was installed and the installed version (%s) is no longer acceptable. Run \"terraform init\" to install all modules required by this configuration.", + record.Version, + ), + Subject: &req.SourceAddrRange, + }) + } + + mod, mDiags := l.parser.LoadConfigDir(record.Dir) + diags = append(diags, mDiags...) + if mod == nil { + // nil specifically indicates that the directory does not exist or + // cannot be read, so in this case we'll discard any generic diagnostics + // returned from LoadConfigDir and produce our own context-sensitive + // error message. + return nil, nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Module not installed", + Detail: fmt.Sprintf("This module's local cache directory %s could not be read. Run \"terraform init\" to install all modules required by this configuration.", record.Dir), + Subject: &req.CallRange, + }, + } + } + + return mod, record.Version, diags +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/loader_snapshot.go b/vendor/github.com/hashicorp/terraform/configs/configload/loader_snapshot.go new file mode 100644 index 0000000000..44c6439733 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/loader_snapshot.go @@ -0,0 +1,504 @@ +package configload + +import ( + "fmt" + "io" + "os" + "path/filepath" + "sort" + "time" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/internal/modsdir" + "github.com/spf13/afero" +) + +// LoadConfigWithSnapshot is a variant of LoadConfig that also simultaneously +// creates an in-memory snapshot of the configuration files used, which can +// be later used to create a loader that may read only from this snapshot. +func (l *Loader) LoadConfigWithSnapshot(rootDir string) (*configs.Config, *Snapshot, hcl.Diagnostics) { + rootMod, diags := l.parser.LoadConfigDir(rootDir) + if rootMod == nil { + return nil, nil, diags + } + + snap := &Snapshot{ + Modules: map[string]*SnapshotModule{}, + } + walker := l.makeModuleWalkerSnapshot(snap) + cfg, cDiags := configs.BuildConfig(rootMod, walker) + diags = append(diags, cDiags...) + + addDiags := l.addModuleToSnapshot(snap, "", rootDir, "", nil) + diags = append(diags, addDiags...) + + return cfg, snap, diags +} + +// NewLoaderFromSnapshot creates a Loader that reads files only from the +// given snapshot. +// +// A snapshot-based loader cannot install modules, so calling InstallModules +// on the return value will cause a panic. +// +// A snapshot-based loader also has access only to configuration files. Its +// underlying parser does not have access to other files in the native +// filesystem, such as values files. For those, either use a normal loader +// (created by NewLoader) or use the configs.Parser API directly. +func NewLoaderFromSnapshot(snap *Snapshot) *Loader { + fs := snapshotFS{snap} + parser := configs.NewParser(fs) + + ret := &Loader{ + parser: parser, + modules: moduleMgr{ + FS: afero.Afero{Fs: fs}, + CanInstall: false, + manifest: snap.moduleManifest(), + }, + } + + return ret +} + +// Snapshot is an in-memory representation of the source files from a +// configuration, which can be used as an alternative configurations source +// for a loader with NewLoaderFromSnapshot. +// +// The primary purpose of a Snapshot is to build the configuration portion +// of a plan file (see ../../plans/planfile) so that it can later be reloaded +// and used to recover the exact configuration that the plan was built from. +type Snapshot struct { + // Modules is a map from opaque module keys (suitable for use as directory + // names on all supported operating systems) to the snapshot information + // about each module. + Modules map[string]*SnapshotModule +} + +// NewEmptySnapshot constructs and returns a snapshot containing only an empty +// root module. This is not useful for anything except placeholders in tests. +func NewEmptySnapshot() *Snapshot { + return &Snapshot{ + Modules: map[string]*SnapshotModule{ + "": &SnapshotModule{ + Files: map[string][]byte{}, + }, + }, + } +} + +// SnapshotModule represents a single module within a Snapshot. +type SnapshotModule struct { + // Dir is the path, relative to the root directory given when the + // snapshot was created, where the module appears in the snapshot's + // virtual filesystem. + Dir string + + // Files is a map from each configuration file filename for the + // module to a raw byte representation of the source file contents. + Files map[string][]byte + + // SourceAddr is the source address given for this module in configuration. + SourceAddr string `json:"Source"` + + // Version is the version of the module that is installed, or nil if + // the module is installed from a source that does not support versions. + Version *version.Version `json:"-"` +} + +// moduleManifest constructs a module manifest based on the contents of +// the receiving snapshot. +func (s *Snapshot) moduleManifest() modsdir.Manifest { + ret := make(modsdir.Manifest) + + for k, modSnap := range s.Modules { + ret[k] = modsdir.Record{ + Key: k, + Dir: modSnap.Dir, + SourceAddr: modSnap.SourceAddr, + Version: modSnap.Version, + } + } + + return ret +} + +// makeModuleWalkerSnapshot creates a configs.ModuleWalker that will exhibit +// the same lookup behaviors as l.moduleWalkerLoad but will additionally write +// source files from the referenced modules into the given snapshot. +func (l *Loader) makeModuleWalkerSnapshot(snap *Snapshot) configs.ModuleWalker { + return configs.ModuleWalkerFunc( + func(req *configs.ModuleRequest) (*configs.Module, *version.Version, hcl.Diagnostics) { + mod, v, diags := l.moduleWalkerLoad(req) + if diags.HasErrors() { + return mod, v, diags + } + + key := l.modules.manifest.ModuleKey(req.Path) + record, exists := l.modules.manifest[key] + + if !exists { + // Should never happen, since otherwise moduleWalkerLoader would've + // returned an error and we would've returned already. + panic(fmt.Sprintf("module %s is not present in manifest", key)) + } + + addDiags := l.addModuleToSnapshot(snap, key, record.Dir, record.SourceAddr, record.Version) + diags = append(diags, addDiags...) + + return mod, v, diags + }, + ) +} + +func (l *Loader) addModuleToSnapshot(snap *Snapshot, key string, dir string, sourceAddr string, v *version.Version) hcl.Diagnostics { + var diags hcl.Diagnostics + + primaryFiles, overrideFiles, moreDiags := l.parser.ConfigDirFiles(dir) + if moreDiags.HasErrors() { + // Any diagnostics we get here should be already present + // in diags, so it's weird if we get here but we'll allow it + // and return a general error message in that case. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Failed to read directory for module", + Detail: fmt.Sprintf("The source directory %s could not be read", dir), + }) + return diags + } + + snapMod := &SnapshotModule{ + Dir: dir, + Files: map[string][]byte{}, + SourceAddr: sourceAddr, + Version: v, + } + + files := make([]string, 0, len(primaryFiles)+len(overrideFiles)) + files = append(files, primaryFiles...) + files = append(files, overrideFiles...) + sources := l.Sources() // should be populated with all the files we need by now + for _, filePath := range files { + filename := filepath.Base(filePath) + src, exists := sources[filePath] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing source file for snapshot", + Detail: fmt.Sprintf("The source code for file %s could not be found to produce a configuration snapshot.", filePath), + }) + continue + } + snapMod.Files[filepath.Clean(filename)] = src + } + + snap.Modules[key] = snapMod + + return diags +} + +// snapshotFS is an implementation of afero.Fs that reads from a snapshot. +// +// This is not intended as a general-purpose filesystem implementation. Instead, +// it just supports the minimal functionality required to support the +// configuration loader and parser as an implementation detail of creating +// a loader from a snapshot. +type snapshotFS struct { + snap *Snapshot +} + +var _ afero.Fs = snapshotFS{} + +func (fs snapshotFS) Create(name string) (afero.File, error) { + return nil, fmt.Errorf("cannot create file inside configuration snapshot") +} + +func (fs snapshotFS) Mkdir(name string, perm os.FileMode) error { + return fmt.Errorf("cannot create directory inside configuration snapshot") +} + +func (fs snapshotFS) MkdirAll(name string, perm os.FileMode) error { + return fmt.Errorf("cannot create directories inside configuration snapshot") +} + +func (fs snapshotFS) Open(name string) (afero.File, error) { + + // Our "filesystem" is sparsely populated only with the directories + // mentioned by modules in our snapshot, so the high-level process + // for opening a file is: + // - Find the module snapshot corresponding to the containing directory + // - Find the file within that snapshot + // - Wrap the resulting byte slice in a snapshotFile to return + // + // The other possibility handled here is if the given name is for the + // module directory itself, in which case we'll return a snapshotDir + // instead. + // + // This function doesn't try to be incredibly robust in supporting + // different permutations of paths, etc because in practice we only + // need to support the path forms that our own loader and parser will + // generate. + + dir := filepath.Dir(name) + fn := filepath.Base(name) + directDir := filepath.Clean(name) + + // First we'll check to see if this is an exact path for a module directory. + // We need to do this first (rather than as part of the next loop below) + // because a module in a child directory of another module can otherwise + // appear to be a file in that parent directory. + for _, candidate := range fs.snap.Modules { + modDir := filepath.Clean(candidate.Dir) + if modDir == directDir { + // We've matched the module directory itself + filenames := make([]string, 0, len(candidate.Files)) + for n := range candidate.Files { + filenames = append(filenames, n) + } + sort.Strings(filenames) + return snapshotDir{ + filenames: filenames, + }, nil + } + } + + // If we get here then the given path isn't a module directory exactly, so + // we'll treat it as a file path and try to find a module directory it + // could be located in. + var modSnap *SnapshotModule + for _, candidate := range fs.snap.Modules { + modDir := filepath.Clean(candidate.Dir) + if modDir == dir { + modSnap = candidate + break + } + } + if modSnap == nil { + return nil, os.ErrNotExist + } + + src, exists := modSnap.Files[fn] + if !exists { + return nil, os.ErrNotExist + } + + return &snapshotFile{ + src: src, + }, nil +} + +func (fs snapshotFS) OpenFile(name string, flag int, perm os.FileMode) (afero.File, error) { + return fs.Open(name) +} + +func (fs snapshotFS) Remove(name string) error { + return fmt.Errorf("cannot remove file inside configuration snapshot") +} + +func (fs snapshotFS) RemoveAll(path string) error { + return fmt.Errorf("cannot remove files inside configuration snapshot") +} + +func (fs snapshotFS) Rename(old, new string) error { + return fmt.Errorf("cannot rename file inside configuration snapshot") +} + +func (fs snapshotFS) Stat(name string) (os.FileInfo, error) { + f, err := fs.Open(name) + if err != nil { + return nil, err + } + _, isDir := f.(snapshotDir) + return snapshotFileInfo{ + name: filepath.Base(name), + isDir: isDir, + }, nil +} + +func (fs snapshotFS) Name() string { + return "ConfigSnapshotFS" +} + +func (fs snapshotFS) Chmod(name string, mode os.FileMode) error { + return fmt.Errorf("cannot set file mode inside configuration snapshot") +} + +func (fs snapshotFS) Chtimes(name string, atime, mtime time.Time) error { + return fmt.Errorf("cannot set file times inside configuration snapshot") +} + +type snapshotFile struct { + snapshotFileStub + src []byte + at int64 +} + +var _ afero.File = (*snapshotFile)(nil) + +func (f *snapshotFile) Read(p []byte) (n int, err error) { + if len(p) > 0 && f.at == int64(len(f.src)) { + return 0, io.EOF + } + if f.at > int64(len(f.src)) { + return 0, io.ErrUnexpectedEOF + } + if int64(len(f.src))-f.at >= int64(len(p)) { + n = len(p) + } else { + n = int(int64(len(f.src)) - f.at) + } + copy(p, f.src[f.at:f.at+int64(n)]) + f.at += int64(n) + return +} + +func (f *snapshotFile) ReadAt(p []byte, off int64) (n int, err error) { + f.at = off + return f.Read(p) +} + +func (f *snapshotFile) Seek(offset int64, whence int) (int64, error) { + switch whence { + case 0: + f.at = offset + case 1: + f.at += offset + case 2: + f.at = int64(len(f.src)) + offset + } + return f.at, nil +} + +type snapshotDir struct { + snapshotFileStub + filenames []string + at int +} + +var _ afero.File = snapshotDir{} + +func (f snapshotDir) Readdir(count int) ([]os.FileInfo, error) { + names, err := f.Readdirnames(count) + if err != nil { + return nil, err + } + ret := make([]os.FileInfo, len(names)) + for i, name := range names { + ret[i] = snapshotFileInfo{ + name: name, + isDir: false, + } + } + return ret, nil +} + +func (f snapshotDir) Readdirnames(count int) ([]string, error) { + var outLen int + names := f.filenames[f.at:] + if count > 0 { + if len(names) < count { + outLen = len(names) + } else { + outLen = count + } + if len(names) == 0 { + return nil, io.EOF + } + } else { + outLen = len(names) + } + f.at += outLen + + return names[:outLen], nil +} + +// snapshotFileInfo is a minimal implementation of os.FileInfo to support our +// virtual filesystem from snapshots. +type snapshotFileInfo struct { + name string + isDir bool +} + +var _ os.FileInfo = snapshotFileInfo{} + +func (fi snapshotFileInfo) Name() string { + return fi.name +} + +func (fi snapshotFileInfo) Size() int64 { + // In practice, our parser and loader never call Size + return -1 +} + +func (fi snapshotFileInfo) Mode() os.FileMode { + return os.ModePerm +} + +func (fi snapshotFileInfo) ModTime() time.Time { + return time.Now() +} + +func (fi snapshotFileInfo) IsDir() bool { + return fi.isDir +} + +func (fi snapshotFileInfo) Sys() interface{} { + return nil +} + +type snapshotFileStub struct{} + +func (f snapshotFileStub) Close() error { + return nil +} + +func (f snapshotFileStub) Read(p []byte) (n int, err error) { + return 0, fmt.Errorf("cannot read") +} + +func (f snapshotFileStub) ReadAt(p []byte, off int64) (n int, err error) { + return 0, fmt.Errorf("cannot read") +} + +func (f snapshotFileStub) Seek(offset int64, whence int) (int64, error) { + return 0, fmt.Errorf("cannot seek") +} + +func (f snapshotFileStub) Write(p []byte) (n int, err error) { + return f.WriteAt(p, 0) +} + +func (f snapshotFileStub) WriteAt(p []byte, off int64) (n int, err error) { + return 0, fmt.Errorf("cannot write to file in snapshot") +} + +func (f snapshotFileStub) WriteString(s string) (n int, err error) { + return 0, fmt.Errorf("cannot write to file in snapshot") +} + +func (f snapshotFileStub) Name() string { + // in practice, the loader and parser never use this + return "" +} + +func (f snapshotFileStub) Readdir(count int) ([]os.FileInfo, error) { + return nil, fmt.Errorf("cannot use Readdir on a file") +} + +func (f snapshotFileStub) Readdirnames(count int) ([]string, error) { + return nil, fmt.Errorf("cannot use Readdir on a file") +} + +func (f snapshotFileStub) Stat() (os.FileInfo, error) { + return nil, fmt.Errorf("cannot stat") +} + +func (f snapshotFileStub) Sync() error { + return nil +} + +func (f snapshotFileStub) Truncate(size int64) error { + return fmt.Errorf("cannot write to file in snapshot") +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/module_mgr.go b/vendor/github.com/hashicorp/terraform/configs/configload/module_mgr.go new file mode 100644 index 0000000000..3c410eeb7d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/module_mgr.go @@ -0,0 +1,76 @@ +package configload + +import ( + "os" + "path/filepath" + + "github.com/hashicorp/terraform/internal/modsdir" + "github.com/hashicorp/terraform/registry" + "github.com/hashicorp/terraform/svchost/disco" + "github.com/spf13/afero" +) + +type moduleMgr struct { + FS afero.Afero + + // CanInstall is true for a module manager that can support installation. + // + // This must be set only if FS is an afero.OsFs, because the installer + // (which uses go-getter) is not aware of the virtual filesystem + // abstraction and will always write into the "real" filesystem. + CanInstall bool + + // Dir is the path where descendent modules are (or will be) installed. + Dir string + + // Services is a service discovery client that will be used to find + // remote module registry endpoints. This object may be pre-loaded with + // cached discovery information. + Services *disco.Disco + + // Registry is a client for the module registry protocol, which is used + // when a module is requested from a registry source. + Registry *registry.Client + + // manifest tracks the currently-installed modules for this manager. + // + // The loader may read this. Only the installer may write to it, and + // after a set of updates are completed the installer must call + // writeModuleManifestSnapshot to persist a snapshot of the manifest + // to disk for use on subsequent runs. + manifest modsdir.Manifest +} + +func (m *moduleMgr) manifestSnapshotPath() string { + return filepath.Join(m.Dir, modsdir.ManifestSnapshotFilename) +} + +// readModuleManifestSnapshot loads a manifest snapshot from the filesystem. +func (m *moduleMgr) readModuleManifestSnapshot() error { + r, err := m.FS.Open(m.manifestSnapshotPath()) + if err != nil { + if os.IsNotExist(err) { + // We'll treat a missing file as an empty manifest + m.manifest = make(modsdir.Manifest) + return nil + } + return err + } + + m.manifest, err = modsdir.ReadManifestSnapshot(r) + return err +} + +// writeModuleManifestSnapshot writes a snapshot of the current manifest +// to the filesystem. +// +// The caller must guarantee no concurrent modifications of the manifest for +// the duration of a call to this function, or the behavior is undefined. +func (m *moduleMgr) writeModuleManifestSnapshot() error { + w, err := m.FS.Create(m.manifestSnapshotPath()) + if err != nil { + return err + } + + return m.manifest.WriteSnapshot(w) +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/source_addr.go b/vendor/github.com/hashicorp/terraform/configs/configload/source_addr.go new file mode 100644 index 0000000000..594cf64062 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/source_addr.go @@ -0,0 +1,45 @@ +package configload + +import ( + "strings" + + "github.com/hashicorp/go-getter" + + "github.com/hashicorp/terraform/registry/regsrc" +) + +var localSourcePrefixes = []string{ + "./", + "../", + ".\\", + "..\\", +} + +func isLocalSourceAddr(addr string) bool { + for _, prefix := range localSourcePrefixes { + if strings.HasPrefix(addr, prefix) { + return true + } + } + return false +} + +func isRegistrySourceAddr(addr string) bool { + _, err := regsrc.ParseModuleSource(addr) + return err == nil +} + +// splitAddrSubdir splits the given address (which is assumed to be a +// registry address or go-getter-style address) into a package portion +// and a sub-directory portion. +// +// The package portion defines what should be downloaded and then the +// sub-directory portion, if present, specifies a sub-directory within +// the downloaded object (an archive, VCS repository, etc) that contains +// the module's configuration files. +// +// The subDir portion will be returned as empty if no subdir separator +// ("//") is present in the address. +func splitAddrSubdir(addr string) (packageAddr, subDir string) { + return getter.SourceDirSubdir(addr) +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/testing.go b/vendor/github.com/hashicorp/terraform/configs/configload/testing.go new file mode 100644 index 0000000000..86ca9d10b7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/testing.go @@ -0,0 +1,43 @@ +package configload + +import ( + "io/ioutil" + "os" + "testing" +) + +// NewLoaderForTests is a variant of NewLoader that is intended to be more +// convenient for unit tests. +// +// The loader's modules directory is a separate temporary directory created +// for each call. Along with the created loader, this function returns a +// cleanup function that should be called before the test completes in order +// to remove that temporary directory. +// +// In the case of any errors, t.Fatal (or similar) will be called to halt +// execution of the test, so the calling test does not need to handle errors +// itself. +func NewLoaderForTests(t *testing.T) (*Loader, func()) { + t.Helper() + + modulesDir, err := ioutil.TempDir("", "tf-configs") + if err != nil { + t.Fatalf("failed to create temporary modules dir: %s", err) + return nil, func() {} + } + + cleanup := func() { + os.RemoveAll(modulesDir) + } + + loader, err := NewLoader(&Config{ + ModulesDir: modulesDir, + }) + if err != nil { + cleanup() + t.Fatalf("failed to create config loader: %s", err) + return nil, func() {} + } + + return loader, cleanup +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/coerce_value.go b/vendor/github.com/hashicorp/terraform/configs/configschema/coerce_value.go new file mode 100644 index 0000000000..e59f58d8e5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configschema/coerce_value.go @@ -0,0 +1,274 @@ +package configschema + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// CoerceValue attempts to force the given value to conform to the type +// implied by the receiever, while also applying the same validation and +// transformation rules that would be applied by the decoder specification +// returned by method DecoderSpec. +// +// This is useful in situations where a configuration must be derived from +// an already-decoded value. It is always better to decode directly from +// configuration where possible since then source location information is +// still available to produce diagnostics, but in special situations this +// function allows a compatible result to be obtained even if the +// configuration objects are not available. +// +// If the given value cannot be converted to conform to the receiving schema +// then an error is returned describing one of possibly many problems. This +// error may be a cty.PathError indicating a position within the nested +// data structure where the problem applies. +func (b *Block) CoerceValue(in cty.Value) (cty.Value, error) { + var path cty.Path + return b.coerceValue(in, path) +} + +func (b *Block) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) { + switch { + case in.IsNull(): + return cty.NullVal(b.ImpliedType()), nil + case !in.IsKnown(): + return cty.UnknownVal(b.ImpliedType()), nil + } + + ty := in.Type() + if !ty.IsObjectType() { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("an object is required") + } + + for name := range ty.AttributeTypes() { + if _, defined := b.Attributes[name]; defined { + continue + } + if _, defined := b.BlockTypes[name]; defined { + continue + } + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("unexpected attribute %q", name) + } + + attrs := make(map[string]cty.Value) + + for name, attrS := range b.Attributes { + var val cty.Value + switch { + case ty.HasAttribute(name): + val = in.GetAttr(name) + case attrS.Computed || attrS.Optional: + val = cty.NullVal(attrS.Type) + default: + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("attribute %q is required", name) + } + + val, err := attrS.coerceValue(val, append(path, cty.GetAttrStep{Name: name})) + if err != nil { + return cty.UnknownVal(b.ImpliedType()), err + } + + attrs[name] = val + } + for typeName, blockS := range b.BlockTypes { + switch blockS.Nesting { + + case NestingSingle, NestingGroup: + switch { + case ty.HasAttribute(typeName): + var err error + val := in.GetAttr(typeName) + attrs[typeName], err = blockS.coerceValue(val, append(path, cty.GetAttrStep{Name: typeName})) + if err != nil { + return cty.UnknownVal(b.ImpliedType()), err + } + case blockS.MinItems != 1 && blockS.MaxItems != 1: + if blockS.Nesting == NestingGroup { + attrs[typeName] = blockS.EmptyValue() + } else { + attrs[typeName] = cty.NullVal(blockS.ImpliedType()) + } + default: + // We use the word "attribute" here because we're talking about + // the cty sense of that word rather than the HCL sense. + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("attribute %q is required", typeName) + } + + case NestingList: + switch { + case ty.HasAttribute(typeName): + coll := in.GetAttr(typeName) + + switch { + case coll.IsNull(): + attrs[typeName] = cty.NullVal(cty.List(blockS.ImpliedType())) + continue + case !coll.IsKnown(): + attrs[typeName] = cty.UnknownVal(cty.List(blockS.ImpliedType())) + continue + } + + if !coll.CanIterateElements() { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a list") + } + l := coll.LengthInt() + if l < blockS.MinItems { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("insufficient items for attribute %q; must have at least %d", typeName, blockS.MinItems) + } + if l > blockS.MaxItems && blockS.MaxItems > 0 { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("too many items for attribute %q; cannot have more than %d", typeName, blockS.MaxItems) + } + if l == 0 { + attrs[typeName] = cty.ListValEmpty(blockS.ImpliedType()) + continue + } + elems := make([]cty.Value, 0, l) + { + path = append(path, cty.GetAttrStep{Name: typeName}) + for it := coll.ElementIterator(); it.Next(); { + var err error + idx, val := it.Element() + val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: idx})) + if err != nil { + return cty.UnknownVal(b.ImpliedType()), err + } + elems = append(elems, val) + } + } + attrs[typeName] = cty.ListVal(elems) + case blockS.MinItems == 0: + attrs[typeName] = cty.ListValEmpty(blockS.ImpliedType()) + default: + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("attribute %q is required", typeName) + } + + case NestingSet: + switch { + case ty.HasAttribute(typeName): + coll := in.GetAttr(typeName) + + switch { + case coll.IsNull(): + attrs[typeName] = cty.NullVal(cty.Set(blockS.ImpliedType())) + continue + case !coll.IsKnown(): + attrs[typeName] = cty.UnknownVal(cty.Set(blockS.ImpliedType())) + continue + } + + if !coll.CanIterateElements() { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a set") + } + l := coll.LengthInt() + if l < blockS.MinItems { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("insufficient items for attribute %q; must have at least %d", typeName, blockS.MinItems) + } + if l > blockS.MaxItems && blockS.MaxItems > 0 { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("too many items for attribute %q; cannot have more than %d", typeName, blockS.MaxItems) + } + if l == 0 { + attrs[typeName] = cty.SetValEmpty(blockS.ImpliedType()) + continue + } + elems := make([]cty.Value, 0, l) + { + path = append(path, cty.GetAttrStep{Name: typeName}) + for it := coll.ElementIterator(); it.Next(); { + var err error + idx, val := it.Element() + val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: idx})) + if err != nil { + return cty.UnknownVal(b.ImpliedType()), err + } + elems = append(elems, val) + } + } + attrs[typeName] = cty.SetVal(elems) + case blockS.MinItems == 0: + attrs[typeName] = cty.SetValEmpty(blockS.ImpliedType()) + default: + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("attribute %q is required", typeName) + } + + case NestingMap: + switch { + case ty.HasAttribute(typeName): + coll := in.GetAttr(typeName) + + switch { + case coll.IsNull(): + attrs[typeName] = cty.NullVal(cty.Map(blockS.ImpliedType())) + continue + case !coll.IsKnown(): + attrs[typeName] = cty.UnknownVal(cty.Map(blockS.ImpliedType())) + continue + } + + if !coll.CanIterateElements() { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a map") + } + l := coll.LengthInt() + if l == 0 { + attrs[typeName] = cty.MapValEmpty(blockS.ImpliedType()) + continue + } + elems := make(map[string]cty.Value) + { + path = append(path, cty.GetAttrStep{Name: typeName}) + for it := coll.ElementIterator(); it.Next(); { + var err error + key, val := it.Element() + if key.Type() != cty.String || key.IsNull() || !key.IsKnown() { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a map") + } + val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: key})) + if err != nil { + return cty.UnknownVal(b.ImpliedType()), err + } + elems[key.AsString()] = val + } + } + + // If the attribute values here contain any DynamicPseudoTypes, + // the concrete type must be an object. + useObject := false + switch { + case coll.Type().IsObjectType(): + useObject = true + default: + // It's possible that we were given a map, and need to coerce it to an object + ety := coll.Type().ElementType() + for _, v := range elems { + if !v.Type().Equals(ety) { + useObject = true + break + } + } + } + + if useObject { + attrs[typeName] = cty.ObjectVal(elems) + } else { + attrs[typeName] = cty.MapVal(elems) + } + default: + attrs[typeName] = cty.MapValEmpty(blockS.ImpliedType()) + } + + default: + // should never happen because above is exhaustive + panic(fmt.Errorf("unsupported nesting mode %#v", blockS.Nesting)) + } + } + + return cty.ObjectVal(attrs), nil +} + +func (a *Attribute) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) { + val, err := convert.Convert(in, a.Type) + if err != nil { + return cty.UnknownVal(a.Type), path.NewError(err) + } + return val, nil +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/decoder_spec.go b/vendor/github.com/hashicorp/terraform/configs/configschema/decoder_spec.go new file mode 100644 index 0000000000..d8f41eabc7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configschema/decoder_spec.go @@ -0,0 +1,117 @@ +package configschema + +import ( + "github.com/hashicorp/hcl2/hcldec" +) + +var mapLabelNames = []string{"key"} + +// DecoderSpec returns a hcldec.Spec that can be used to decode a HCL Body +// using the facilities in the hcldec package. +// +// The returned specification is guaranteed to return a value of the same type +// returned by method ImpliedType, but it may contain null values if any of the +// block attributes are defined as optional and/or computed respectively. +func (b *Block) DecoderSpec() hcldec.Spec { + ret := hcldec.ObjectSpec{} + if b == nil { + return ret + } + + for name, attrS := range b.Attributes { + ret[name] = attrS.decoderSpec(name) + } + + for name, blockS := range b.BlockTypes { + if _, exists := ret[name]; exists { + // This indicates an invalid schema, since it's not valid to + // define both an attribute and a block type of the same name. + // However, we don't raise this here since it's checked by + // InternalValidate. + continue + } + + childSpec := blockS.Block.DecoderSpec() + + switch blockS.Nesting { + case NestingSingle, NestingGroup: + ret[name] = &hcldec.BlockSpec{ + TypeName: name, + Nested: childSpec, + Required: blockS.MinItems == 1 && blockS.MaxItems >= 1, + } + if blockS.Nesting == NestingGroup { + ret[name] = &hcldec.DefaultSpec{ + Primary: ret[name], + Default: &hcldec.LiteralSpec{ + Value: blockS.EmptyValue(), + }, + } + } + case NestingList: + // We prefer to use a list where possible, since it makes our + // implied type more complete, but if there are any + // dynamically-typed attributes inside we must use a tuple + // instead, at the expense of our type then not being predictable. + if blockS.Block.ImpliedType().HasDynamicTypes() { + ret[name] = &hcldec.BlockTupleSpec{ + TypeName: name, + Nested: childSpec, + MinItems: blockS.MinItems, + MaxItems: blockS.MaxItems, + } + } else { + ret[name] = &hcldec.BlockListSpec{ + TypeName: name, + Nested: childSpec, + MinItems: blockS.MinItems, + MaxItems: blockS.MaxItems, + } + } + case NestingSet: + // We forbid dynamically-typed attributes inside NestingSet in + // InternalValidate, so we don't do anything special to handle + // that here. (There is no set analog to tuple and object types, + // because cty's set implementation depends on knowing the static + // type in order to properly compute its internal hashes.) + ret[name] = &hcldec.BlockSetSpec{ + TypeName: name, + Nested: childSpec, + MinItems: blockS.MinItems, + MaxItems: blockS.MaxItems, + } + case NestingMap: + // We prefer to use a list where possible, since it makes our + // implied type more complete, but if there are any + // dynamically-typed attributes inside we must use a tuple + // instead, at the expense of our type then not being predictable. + if blockS.Block.ImpliedType().HasDynamicTypes() { + ret[name] = &hcldec.BlockObjectSpec{ + TypeName: name, + Nested: childSpec, + LabelNames: mapLabelNames, + } + } else { + ret[name] = &hcldec.BlockMapSpec{ + TypeName: name, + Nested: childSpec, + LabelNames: mapLabelNames, + } + } + default: + // Invalid nesting type is just ignored. It's checked by + // InternalValidate. + continue + } + } + + return ret +} + +func (a *Attribute) decoderSpec(name string) hcldec.Spec { + return &hcldec.AttrSpec{ + Name: name, + Type: a.Type, + Required: a.Required, + } +} diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/doc.go b/vendor/github.com/hashicorp/terraform/configs/configschema/doc.go similarity index 100% rename from vendor/github.com/hashicorp/terraform/config/configschema/doc.go rename to vendor/github.com/hashicorp/terraform/configs/configschema/doc.go diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/empty_value.go b/vendor/github.com/hashicorp/terraform/configs/configschema/empty_value.go new file mode 100644 index 0000000000..005da56bf5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configschema/empty_value.go @@ -0,0 +1,59 @@ +package configschema + +import ( + "github.com/zclconf/go-cty/cty" +) + +// EmptyValue returns the "empty value" for the recieving block, which for +// a block type is a non-null object where all of the attribute values are +// the empty values of the block's attributes and nested block types. +// +// In other words, it returns the value that would be returned if an empty +// block were decoded against the recieving schema, assuming that no required +// attribute or block constraints were honored. +func (b *Block) EmptyValue() cty.Value { + vals := make(map[string]cty.Value) + for name, attrS := range b.Attributes { + vals[name] = attrS.EmptyValue() + } + for name, blockS := range b.BlockTypes { + vals[name] = blockS.EmptyValue() + } + return cty.ObjectVal(vals) +} + +// EmptyValue returns the "empty value" for the receiving attribute, which is +// the value that would be returned if there were no definition of the attribute +// at all, ignoring any required constraint. +func (a *Attribute) EmptyValue() cty.Value { + return cty.NullVal(a.Type) +} + +// EmptyValue returns the "empty value" for when there are zero nested blocks +// present of the receiving type. +func (b *NestedBlock) EmptyValue() cty.Value { + switch b.Nesting { + case NestingSingle: + return cty.NullVal(b.Block.ImpliedType()) + case NestingGroup: + return b.Block.EmptyValue() + case NestingList: + if ty := b.Block.ImpliedType(); ty.HasDynamicTypes() { + return cty.EmptyTupleVal + } else { + return cty.ListValEmpty(ty) + } + case NestingMap: + if ty := b.Block.ImpliedType(); ty.HasDynamicTypes() { + return cty.EmptyObjectVal + } else { + return cty.MapValEmpty(ty) + } + case NestingSet: + return cty.SetValEmpty(b.Block.ImpliedType()) + default: + // Should never get here because the above is intended to be exhaustive, + // but we'll be robust and return a result nonetheless. + return cty.NullVal(cty.DynamicPseudoType) + } +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/implied_type.go b/vendor/github.com/hashicorp/terraform/configs/configschema/implied_type.go new file mode 100644 index 0000000000..c0ee8419d3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configschema/implied_type.go @@ -0,0 +1,42 @@ +package configschema + +import ( + "github.com/hashicorp/hcl2/hcldec" + "github.com/zclconf/go-cty/cty" +) + +// ImpliedType returns the cty.Type that would result from decoding a +// configuration block using the receiving block schema. +// +// ImpliedType always returns a result, even if the given schema is +// inconsistent. Code that creates configschema.Block objects should be +// tested using the InternalValidate method to detect any inconsistencies +// that would cause this method to fall back on defaults and assumptions. +func (b *Block) ImpliedType() cty.Type { + if b == nil { + return cty.EmptyObject + } + + return hcldec.ImpliedType(b.DecoderSpec()) +} + +// ContainsSensitive returns true if any of the attributes of the receiving +// block or any of its descendent blocks are marked as sensitive. +// +// Blocks themselves cannot be sensitive as a whole -- sensitivity is a +// per-attribute idea -- but sometimes we want to include a whole object +// decoded from a block in some UI output, and that is safe to do only if +// none of the contained attributes are sensitive. +func (b *Block) ContainsSensitive() bool { + for _, attrS := range b.Attributes { + if attrS.Sensitive { + return true + } + } + for _, blockS := range b.BlockTypes { + if blockS.ContainsSensitive() { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/internal_validate.go b/vendor/github.com/hashicorp/terraform/configs/configschema/internal_validate.go similarity index 83% rename from vendor/github.com/hashicorp/terraform/config/configschema/internal_validate.go rename to vendor/github.com/hashicorp/terraform/configs/configschema/internal_validate.go index 33cbe884f1..ebf1abbab1 100644 --- a/vendor/github.com/hashicorp/terraform/config/configschema/internal_validate.go +++ b/vendor/github.com/hashicorp/terraform/configs/configschema/internal_validate.go @@ -72,10 +72,23 @@ func (b *Block) internalValidate(prefix string, err error) error { case blockS.MinItems < 0 || blockS.MinItems > 1: err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must be set to either 0 or 1 in NestingSingle mode", prefix, name)) } + case NestingGroup: + if blockS.MinItems != 0 || blockS.MaxItems != 0 { + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems cannot be used in NestingGroup mode", prefix, name)) + } case NestingList, NestingSet: if blockS.MinItems > blockS.MaxItems && blockS.MaxItems != 0 { err = multierror.Append(err, fmt.Errorf("%s%s: MinItems must be less than or equal to MaxItems in %s mode", prefix, name, blockS.Nesting)) } + if blockS.Nesting == NestingSet { + ety := blockS.Block.ImpliedType() + if ety.HasDynamicTypes() { + // This is not permitted because the HCL (cty) set implementation + // needs to know the exact type of set elements in order to + // properly hash them, and so can't support mixed types. + err = multierror.Append(err, fmt.Errorf("%s%s: NestingSet blocks may not contain attributes of cty.DynamicPseudoType", prefix, name)) + } + } case NestingMap: if blockS.MinItems != 0 || blockS.MaxItems != 0 { err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must both be 0 in NestingMap mode", prefix, name)) diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/nestingmode_string.go b/vendor/github.com/hashicorp/terraform/configs/configschema/nestingmode_string.go new file mode 100644 index 0000000000..febe743e11 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configschema/nestingmode_string.go @@ -0,0 +1,28 @@ +// Code generated by "stringer -type=NestingMode"; DO NOT EDIT. + +package configschema + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[nestingModeInvalid-0] + _ = x[NestingSingle-1] + _ = x[NestingGroup-2] + _ = x[NestingList-3] + _ = x[NestingSet-4] + _ = x[NestingMap-5] +} + +const _NestingMode_name = "nestingModeInvalidNestingSingleNestingGroupNestingListNestingSetNestingMap" + +var _NestingMode_index = [...]uint8{0, 18, 31, 43, 54, 64, 74} + +func (i NestingMode) String() string { + if i < 0 || i >= NestingMode(len(_NestingMode_index)-1) { + return "NestingMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _NestingMode_name[_NestingMode_index[i]:_NestingMode_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/none_required.go b/vendor/github.com/hashicorp/terraform/configs/configschema/none_required.go new file mode 100644 index 0000000000..0be3b8fa35 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configschema/none_required.go @@ -0,0 +1,38 @@ +package configschema + +// NoneRequired returns a deep copy of the receiver with any required +// attributes translated to optional. +func (b *Block) NoneRequired() *Block { + ret := &Block{} + + if b.Attributes != nil { + ret.Attributes = make(map[string]*Attribute, len(b.Attributes)) + } + for name, attrS := range b.Attributes { + ret.Attributes[name] = attrS.forceOptional() + } + + if b.BlockTypes != nil { + ret.BlockTypes = make(map[string]*NestedBlock, len(b.BlockTypes)) + } + for name, blockS := range b.BlockTypes { + ret.BlockTypes[name] = blockS.noneRequired() + } + + return ret +} + +func (b *NestedBlock) noneRequired() *NestedBlock { + ret := *b + ret.Block = *(ret.Block.NoneRequired()) + ret.MinItems = 0 + ret.MaxItems = 0 + return &ret +} + +func (a *Attribute) forceOptional() *Attribute { + ret := *a + ret.Optional = true + ret.Required = false + return &ret +} diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/schema.go b/vendor/github.com/hashicorp/terraform/configs/configschema/schema.go similarity index 75% rename from vendor/github.com/hashicorp/terraform/config/configschema/schema.go rename to vendor/github.com/hashicorp/terraform/configs/configschema/schema.go index 9a8ee550aa..5a67334d47 100644 --- a/vendor/github.com/hashicorp/terraform/config/configschema/schema.go +++ b/vendor/github.com/hashicorp/terraform/configs/configschema/schema.go @@ -28,6 +28,12 @@ type Attribute struct { // Type is a type specification that the attribute's value must conform to. Type cty.Type + // Description is an English-language description of the purpose and + // usage of the attribute. A description should be concise and use only + // one or two sentences, leaving full definition to longer-form + // documentation defined elsewhere. + Description string + // Required, if set to true, specifies that an omitted or null value is // not permitted. Required bool @@ -87,6 +93,23 @@ const ( // provided directly as an object value. NestingSingle + // NestingGroup is similar to NestingSingle in that it calls for only a + // single instance of a given block type with no labels, but it additonally + // guarantees that its result will never be null, even if the block is + // absent, and instead the nested attributes and blocks will be treated + // as absent in that case. (Any required attributes or blocks within the + // nested block are not enforced unless the block is explicitly present + // in the configuration, so they are all effectively optional when the + // block is not present.) + // + // This is useful for the situation where a remote API has a feature that + // is always enabled but has a group of settings related to that feature + // that themselves have default values. By using NestingGroup instead of + // NestingSingle in that case, generated plans will show the block as + // present even when not present in configuration, thus allowing any + // default values within to be displayed to the user. + NestingGroup + // NestingList indicates that multiple blocks of the given type are // permitted, with no labels, and that their corresponding objects should // be provided in a list. diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/validate_traversal.go b/vendor/github.com/hashicorp/terraform/configs/configschema/validate_traversal.go new file mode 100644 index 0000000000..a41e930bae --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configschema/validate_traversal.go @@ -0,0 +1,173 @@ +package configschema + +import ( + "fmt" + "sort" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/helper/didyoumean" + "github.com/hashicorp/terraform/tfdiags" +) + +// StaticValidateTraversal checks whether the given traversal (which must be +// relative) refers to a construct in the receiving schema, returning error +// diagnostics if any problems are found. +// +// This method is "optimistic" in that it will not return errors for possible +// problems that cannot be detected statically. It is possible that an +// traversal which passed static validation will still fail when evaluated. +func (b *Block) StaticValidateTraversal(traversal hcl.Traversal) tfdiags.Diagnostics { + if !traversal.IsRelative() { + panic("StaticValidateTraversal on absolute traversal") + } + if len(traversal) == 0 { + return nil + } + + var diags tfdiags.Diagnostics + + next := traversal[0] + after := traversal[1:] + + var name string + switch step := next.(type) { + case hcl.TraverseAttr: + name = step.Name + case hcl.TraverseIndex: + // No other traversal step types are allowed directly at a block. + // If it looks like the user was trying to use index syntax to + // access an attribute then we'll produce a specialized message. + key := step.Key + if key.Type() == cty.String && key.IsKnown() && !key.IsNull() { + maybeName := key.AsString() + if hclsyntax.ValidIdentifier(maybeName) { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid index operation`, + Detail: fmt.Sprintf(`Only attribute access is allowed here. Did you mean to access attribute %q using the dot operator?`, maybeName), + Subject: &step.SrcRange, + }) + return diags + } + } + // If it looks like some other kind of index then we'll use a generic error. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid index operation`, + Detail: `Only attribute access is allowed here, using the dot operator.`, + Subject: &step.SrcRange, + }) + return diags + default: + // No other traversal types should appear in a normal valid traversal, + // but we'll handle this with a generic error anyway to be robust. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid operation`, + Detail: `Only attribute access is allowed here, using the dot operator.`, + Subject: next.SourceRange().Ptr(), + }) + return diags + } + + if attrS, exists := b.Attributes[name]; exists { + // For attribute validation we will just apply the rest of the + // traversal to an unknown value of the attribute type and pass + // through HCL's own errors, since we don't want to replicate all of + // HCL's type checking rules here. + val := cty.UnknownVal(attrS.Type) + _, hclDiags := after.TraverseRel(val) + diags = diags.Append(hclDiags) + return diags + } + + if blockS, exists := b.BlockTypes[name]; exists { + moreDiags := blockS.staticValidateTraversal(name, after) + diags = diags.Append(moreDiags) + return diags + } + + // If we get here then the name isn't valid at all. We'll collect up + // all of the names that _are_ valid to use as suggestions. + var suggestions []string + for name := range b.Attributes { + suggestions = append(suggestions, name) + } + for name := range b.BlockTypes { + suggestions = append(suggestions, name) + } + sort.Strings(suggestions) + suggestion := didyoumean.NameSuggestion(name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Unsupported attribute`, + Detail: fmt.Sprintf(`This object has no argument, nested block, or exported attribute named %q.%s`, name, suggestion), + Subject: next.SourceRange().Ptr(), + }) + + return diags +} + +func (b *NestedBlock) staticValidateTraversal(typeName string, traversal hcl.Traversal) tfdiags.Diagnostics { + if b.Nesting == NestingSingle || b.Nesting == NestingGroup { + // Single blocks are easy: just pass right through. + return b.Block.StaticValidateTraversal(traversal) + } + + if len(traversal) == 0 { + // It's always valid to access a nested block's attribute directly. + return nil + } + + var diags tfdiags.Diagnostics + next := traversal[0] + after := traversal[1:] + + switch b.Nesting { + + case NestingSet: + // Can't traverse into a set at all, since it does not have any keys + // to index with. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Cannot index a set value`, + Detail: fmt.Sprintf(`Block type %q is represented by a set of objects, and set elements do not have addressable keys. To find elements matching specific criteria, use a "for" expression with an "if" clause.`, typeName), + Subject: next.SourceRange().Ptr(), + }) + return diags + + case NestingList: + if _, ok := next.(hcl.TraverseIndex); ok { + moreDiags := b.Block.StaticValidateTraversal(after) + diags = diags.Append(moreDiags) + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid operation`, + Detail: fmt.Sprintf(`Block type %q is represented by a list of objects, so it must be indexed using a numeric key, like .%s[0].`, typeName, typeName), + Subject: next.SourceRange().Ptr(), + }) + } + return diags + + case NestingMap: + // Both attribute and index steps are valid for maps, so we'll just + // pass through here and let normal evaluation catch an + // incorrectly-typed index key later, if present. + moreDiags := b.Block.StaticValidateTraversal(after) + diags = diags.Append(moreDiags) + return diags + + default: + // Invalid nesting type is just ignored. It's checked by + // InternalValidate. (Note that we handled NestingSingle separately + // back at the start of this function.) + return nil + } +} diff --git a/vendor/github.com/hashicorp/terraform/configs/depends_on.go b/vendor/github.com/hashicorp/terraform/configs/depends_on.go new file mode 100644 index 0000000000..b1984768fe --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/depends_on.go @@ -0,0 +1,23 @@ +package configs + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +func decodeDependsOn(attr *hcl.Attribute) ([]hcl.Traversal, hcl.Diagnostics) { + var ret []hcl.Traversal + exprs, diags := hcl.ExprList(attr.Expr) + + for _, expr := range exprs { + expr, shimDiags := shimTraversalInString(expr, false) + diags = append(diags, shimDiags...) + + traversal, travDiags := hcl.AbsTraversalForExpr(expr) + diags = append(diags, travDiags...) + if len(traversal) != 0 { + ret = append(ret, traversal) + } + } + + return ret, diags +} diff --git a/vendor/github.com/hashicorp/terraform/configs/doc.go b/vendor/github.com/hashicorp/terraform/configs/doc.go new file mode 100644 index 0000000000..f01eb79f40 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/doc.go @@ -0,0 +1,19 @@ +// Package configs contains types that represent Terraform configurations and +// the different elements thereof. +// +// The functionality in this package can be used for some static analyses of +// Terraform configurations, but this package generally exposes representations +// of the configuration source code rather than the result of evaluating these +// objects. The sibling package "lang" deals with evaluation of structures +// and expressions in the configuration. +// +// Due to its close relationship with HCL, this package makes frequent use +// of types from the HCL API, including raw HCL diagnostic messages. Such +// diagnostics can be converted into Terraform-flavored diagnostics, if needed, +// using functions in the sibling package tfdiags. +// +// The Parser type is the main entry-point into this package. The LoadConfigDir +// method can be used to load a single module directory, and then a full +// configuration (including any descendent modules) can be produced using +// the top-level BuildConfig method. +package configs diff --git a/vendor/github.com/hashicorp/terraform/configs/module.go b/vendor/github.com/hashicorp/terraform/configs/module.go new file mode 100644 index 0000000000..250f9d3451 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/module.go @@ -0,0 +1,404 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + + "github.com/hashicorp/terraform/addrs" +) + +// Module is a container for a set of configuration constructs that are +// evaluated within a common namespace. +type Module struct { + // SourceDir is the filesystem directory that the module was loaded from. + // + // This is populated automatically only for configurations loaded with + // LoadConfigDir. If the parser is using a virtual filesystem then the + // path here will be in terms of that virtual filesystem. + + // Any other caller that constructs a module directly with NewModule may + // assign a suitable value to this attribute before using it for other + // purposes. It should be treated as immutable by all consumers of Module + // values. + SourceDir string + + CoreVersionConstraints []VersionConstraint + + Backend *Backend + ProviderConfigs map[string]*Provider + ProviderRequirements map[string][]VersionConstraint + + Variables map[string]*Variable + Locals map[string]*Local + Outputs map[string]*Output + + ModuleCalls map[string]*ModuleCall + + ManagedResources map[string]*Resource + DataResources map[string]*Resource +} + +// File describes the contents of a single configuration file. +// +// Individual files are not usually used alone, but rather combined together +// with other files (conventionally, those in the same directory) to produce +// a *Module, using NewModule. +// +// At the level of an individual file we represent directly the structural +// elements present in the file, without any attempt to detect conflicting +// declarations. A File object can therefore be used for some basic static +// analysis of individual elements, but must be built into a Module to detect +// duplicate declarations. +type File struct { + CoreVersionConstraints []VersionConstraint + + Backends []*Backend + ProviderConfigs []*Provider + ProviderRequirements []*ProviderRequirement + + Variables []*Variable + Locals []*Local + Outputs []*Output + + ModuleCalls []*ModuleCall + + ManagedResources []*Resource + DataResources []*Resource +} + +// NewModule takes a list of primary files and a list of override files and +// produces a *Module by combining the files together. +// +// If there are any conflicting declarations in the given files -- for example, +// if the same variable name is defined twice -- then the resulting module +// will be incomplete and error diagnostics will be returned. Careful static +// analysis of the returned Module is still possible in this case, but the +// module will probably not be semantically valid. +func NewModule(primaryFiles, overrideFiles []*File) (*Module, hcl.Diagnostics) { + var diags hcl.Diagnostics + mod := &Module{ + ProviderConfigs: map[string]*Provider{}, + ProviderRequirements: map[string][]VersionConstraint{}, + Variables: map[string]*Variable{}, + Locals: map[string]*Local{}, + Outputs: map[string]*Output{}, + ModuleCalls: map[string]*ModuleCall{}, + ManagedResources: map[string]*Resource{}, + DataResources: map[string]*Resource{}, + } + + for _, file := range primaryFiles { + fileDiags := mod.appendFile(file) + diags = append(diags, fileDiags...) + } + + for _, file := range overrideFiles { + fileDiags := mod.mergeFile(file) + diags = append(diags, fileDiags...) + } + + return mod, diags +} + +// ResourceByAddr returns the configuration for the resource with the given +// address, or nil if there is no such resource. +func (m *Module) ResourceByAddr(addr addrs.Resource) *Resource { + key := addr.String() + switch addr.Mode { + case addrs.ManagedResourceMode: + return m.ManagedResources[key] + case addrs.DataResourceMode: + return m.DataResources[key] + default: + return nil + } +} + +func (m *Module) appendFile(file *File) hcl.Diagnostics { + var diags hcl.Diagnostics + + for _, constraint := range file.CoreVersionConstraints { + // If there are any conflicting requirements then we'll catch them + // when we actually check these constraints. + m.CoreVersionConstraints = append(m.CoreVersionConstraints, constraint) + } + + for _, b := range file.Backends { + if m.Backend != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate backend configuration", + Detail: fmt.Sprintf("A module may have only one backend configuration. The backend was previously configured at %s.", m.Backend.DeclRange), + Subject: &b.DeclRange, + }) + continue + } + m.Backend = b + } + + for _, pc := range file.ProviderConfigs { + key := pc.moduleUniqueKey() + if existing, exists := m.ProviderConfigs[key]; exists { + if existing.Alias == "" { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate provider configuration", + Detail: fmt.Sprintf("A default (non-aliased) provider configuration for %q was already given at %s. If multiple configurations are required, set the \"alias\" argument for alternative configurations.", existing.Name, existing.DeclRange), + Subject: &pc.DeclRange, + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate provider configuration", + Detail: fmt.Sprintf("A provider configuration for %q with alias %q was already given at %s. Each configuration for the same provider must have a distinct alias.", existing.Name, existing.Alias, existing.DeclRange), + Subject: &pc.DeclRange, + }) + } + continue + } + m.ProviderConfigs[key] = pc + } + + for _, reqd := range file.ProviderRequirements { + m.ProviderRequirements[reqd.Name] = append(m.ProviderRequirements[reqd.Name], reqd.Requirement) + } + + for _, v := range file.Variables { + if existing, exists := m.Variables[v.Name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate variable declaration", + Detail: fmt.Sprintf("A variable named %q was already declared at %s. Variable names must be unique within a module.", existing.Name, existing.DeclRange), + Subject: &v.DeclRange, + }) + } + m.Variables[v.Name] = v + } + + for _, l := range file.Locals { + if existing, exists := m.Locals[l.Name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate local value definition", + Detail: fmt.Sprintf("A local value named %q was already defined at %s. Local value names must be unique within a module.", existing.Name, existing.DeclRange), + Subject: &l.DeclRange, + }) + } + m.Locals[l.Name] = l + } + + for _, o := range file.Outputs { + if existing, exists := m.Outputs[o.Name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate output definition", + Detail: fmt.Sprintf("An output named %q was already defined at %s. Output names must be unique within a module.", existing.Name, existing.DeclRange), + Subject: &o.DeclRange, + }) + } + m.Outputs[o.Name] = o + } + + for _, mc := range file.ModuleCalls { + if existing, exists := m.ModuleCalls[mc.Name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate module call", + Detail: fmt.Sprintf("An module call named %q was already defined at %s. Module calls must have unique names within a module.", existing.Name, existing.DeclRange), + Subject: &mc.DeclRange, + }) + } + m.ModuleCalls[mc.Name] = mc + } + + for _, r := range file.ManagedResources { + key := r.moduleUniqueKey() + if existing, exists := m.ManagedResources[key]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate resource %q configuration", existing.Type), + Detail: fmt.Sprintf("A %s resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange), + Subject: &r.DeclRange, + }) + continue + } + m.ManagedResources[key] = r + } + + for _, r := range file.DataResources { + key := r.moduleUniqueKey() + if existing, exists := m.DataResources[key]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate data %q configuration", existing.Type), + Detail: fmt.Sprintf("A %s data resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange), + Subject: &r.DeclRange, + }) + continue + } + m.DataResources[key] = r + } + + return diags +} + +func (m *Module) mergeFile(file *File) hcl.Diagnostics { + var diags hcl.Diagnostics + + if len(file.CoreVersionConstraints) != 0 { + // This is a bit of a strange case for overriding since we normally + // would union together across multiple files anyway, but we'll + // allow it and have each override file clobber any existing list. + m.CoreVersionConstraints = nil + for _, constraint := range file.CoreVersionConstraints { + m.CoreVersionConstraints = append(m.CoreVersionConstraints, constraint) + } + } + + if len(file.Backends) != 0 { + switch len(file.Backends) { + case 1: + m.Backend = file.Backends[0] + default: + // An override file with multiple backends is still invalid, even + // though it can override backends from _other_ files. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate backend configuration", + Detail: fmt.Sprintf("Each override file may have only one backend configuration. A backend was previously configured at %s.", file.Backends[0].DeclRange), + Subject: &file.Backends[1].DeclRange, + }) + } + } + + for _, pc := range file.ProviderConfigs { + key := pc.moduleUniqueKey() + existing, exists := m.ProviderConfigs[key] + if pc.Alias == "" { + // We allow overriding a non-existing _default_ provider configuration + // because the user model is that an absent provider configuration + // implies an empty provider configuration, which is what the user + // is therefore overriding here. + if exists { + mergeDiags := existing.merge(pc) + diags = append(diags, mergeDiags...) + } else { + m.ProviderConfigs[key] = pc + } + } else { + // For aliased providers, there must be a base configuration to + // override. This allows us to detect and report alias typos + // that might otherwise cause the override to not apply. + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing base provider configuration for override", + Detail: fmt.Sprintf("There is no %s provider configuration with the alias %q. An override file can only override an aliased provider configuration that was already defined in a primary configuration file.", pc.Name, pc.Alias), + Subject: &pc.DeclRange, + }) + continue + } + mergeDiags := existing.merge(pc) + diags = append(diags, mergeDiags...) + } + } + + if len(file.ProviderRequirements) != 0 { + mergeProviderVersionConstraints(m.ProviderRequirements, file.ProviderRequirements) + } + + for _, v := range file.Variables { + existing, exists := m.Variables[v.Name] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing base variable declaration to override", + Detail: fmt.Sprintf("There is no variable named %q. An override file can only override a variable that was already declared in a primary configuration file.", v.Name), + Subject: &v.DeclRange, + }) + continue + } + mergeDiags := existing.merge(v) + diags = append(diags, mergeDiags...) + } + + for _, l := range file.Locals { + existing, exists := m.Locals[l.Name] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing base local value definition to override", + Detail: fmt.Sprintf("There is no local value named %q. An override file can only override a local value that was already defined in a primary configuration file.", l.Name), + Subject: &l.DeclRange, + }) + continue + } + mergeDiags := existing.merge(l) + diags = append(diags, mergeDiags...) + } + + for _, o := range file.Outputs { + existing, exists := m.Outputs[o.Name] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing base output definition to override", + Detail: fmt.Sprintf("There is no output named %q. An override file can only override an output that was already defined in a primary configuration file.", o.Name), + Subject: &o.DeclRange, + }) + continue + } + mergeDiags := existing.merge(o) + diags = append(diags, mergeDiags...) + } + + for _, mc := range file.ModuleCalls { + existing, exists := m.ModuleCalls[mc.Name] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing module call to override", + Detail: fmt.Sprintf("There is no module call named %q. An override file can only override a module call that was defined in a primary configuration file.", mc.Name), + Subject: &mc.DeclRange, + }) + continue + } + mergeDiags := existing.merge(mc) + diags = append(diags, mergeDiags...) + } + + for _, r := range file.ManagedResources { + key := r.moduleUniqueKey() + existing, exists := m.ManagedResources[key] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing resource to override", + Detail: fmt.Sprintf("There is no %s resource named %q. An override file can only override a resource block defined in a primary configuration file.", r.Type, r.Name), + Subject: &r.DeclRange, + }) + continue + } + mergeDiags := existing.merge(r) + diags = append(diags, mergeDiags...) + } + + for _, r := range file.DataResources { + key := r.moduleUniqueKey() + existing, exists := m.DataResources[key] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing data resource to override", + Detail: fmt.Sprintf("There is no %s data resource named %q. An override file can only override a data block defined in a primary configuration file.", r.Type, r.Name), + Subject: &r.DeclRange, + }) + continue + } + mergeDiags := existing.merge(r) + diags = append(diags, mergeDiags...) + } + + return diags +} diff --git a/vendor/github.com/hashicorp/terraform/configs/module_call.go b/vendor/github.com/hashicorp/terraform/configs/module_call.go new file mode 100644 index 0000000000..8c3ba67ce6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/module_call.go @@ -0,0 +1,188 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl2/gohcl" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" +) + +// ModuleCall represents a "module" block in a module or file. +type ModuleCall struct { + Name string + + SourceAddr string + SourceAddrRange hcl.Range + SourceSet bool + + Config hcl.Body + + Version VersionConstraint + + Count hcl.Expression + ForEach hcl.Expression + + Providers []PassedProviderConfig + + DependsOn []hcl.Traversal + + DeclRange hcl.Range +} + +func decodeModuleBlock(block *hcl.Block, override bool) (*ModuleCall, hcl.Diagnostics) { + mc := &ModuleCall{ + Name: block.Labels[0], + DeclRange: block.DefRange, + } + + schema := moduleBlockSchema + if override { + schema = schemaForOverrides(schema) + } + + content, remain, diags := block.Body.PartialContent(schema) + mc.Config = remain + + if !hclsyntax.ValidIdentifier(mc.Name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid module instance name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[0], + }) + } + + if attr, exists := content.Attributes["source"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &mc.SourceAddr) + diags = append(diags, valDiags...) + mc.SourceAddrRange = attr.Expr.Range() + mc.SourceSet = true + } + + if attr, exists := content.Attributes["version"]; exists { + var versionDiags hcl.Diagnostics + mc.Version, versionDiags = decodeVersionConstraint(attr) + diags = append(diags, versionDiags...) + } + + if attr, exists := content.Attributes["count"]; exists { + mc.Count = attr.Expr + + // We currently parse this, but don't yet do anything with it. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved argument name in module block", + Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), + Subject: &attr.NameRange, + }) + } + + if attr, exists := content.Attributes["for_each"]; exists { + mc.ForEach = attr.Expr + + // We currently parse this, but don't yet do anything with it. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved argument name in module block", + Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), + Subject: &attr.NameRange, + }) + } + + if attr, exists := content.Attributes["depends_on"]; exists { + deps, depsDiags := decodeDependsOn(attr) + diags = append(diags, depsDiags...) + mc.DependsOn = append(mc.DependsOn, deps...) + + // We currently parse this, but don't yet do anything with it. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved argument name in module block", + Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), + Subject: &attr.NameRange, + }) + } + + if attr, exists := content.Attributes["providers"]; exists { + seen := make(map[string]hcl.Range) + pairs, pDiags := hcl.ExprMap(attr.Expr) + diags = append(diags, pDiags...) + for _, pair := range pairs { + key, keyDiags := decodeProviderConfigRef(pair.Key, "providers") + diags = append(diags, keyDiags...) + value, valueDiags := decodeProviderConfigRef(pair.Value, "providers") + diags = append(diags, valueDiags...) + if keyDiags.HasErrors() || valueDiags.HasErrors() { + continue + } + + matchKey := key.String() + if prev, exists := seen[matchKey]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate provider address", + Detail: fmt.Sprintf("A provider configuration was already passed to %s at %s. Each child provider configuration can be assigned only once.", matchKey, prev), + Subject: pair.Value.Range().Ptr(), + }) + continue + } + + rng := hcl.RangeBetween(pair.Key.Range(), pair.Value.Range()) + seen[matchKey] = rng + mc.Providers = append(mc.Providers, PassedProviderConfig{ + InChild: key, + InParent: value, + }) + } + } + + // Reserved block types (all of them) + for _, block := range content.Blocks { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved block type name in module block", + Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), + Subject: &block.TypeRange, + }) + } + + return mc, diags +} + +// PassedProviderConfig represents a provider config explicitly passed down to +// a child module, possibly giving it a new local address in the process. +type PassedProviderConfig struct { + InChild *ProviderConfigRef + InParent *ProviderConfigRef +} + +var moduleBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "source", + Required: true, + }, + { + Name: "version", + }, + { + Name: "count", + }, + { + Name: "for_each", + }, + { + Name: "depends_on", + }, + { + Name: "providers", + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + // These are all reserved for future use. + {Type: "lifecycle"}, + {Type: "locals"}, + {Type: "provider", LabelNames: []string{"type"}}, + }, +} diff --git a/vendor/github.com/hashicorp/terraform/configs/module_merge.go b/vendor/github.com/hashicorp/terraform/configs/module_merge.go new file mode 100644 index 0000000000..12614c1d60 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/module_merge.go @@ -0,0 +1,247 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/terraform/addrs" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// The methods in this file are used by Module.mergeFile to apply overrides +// to our different configuration elements. These methods all follow the +// pattern of mutating the receiver to incorporate settings from the parameter, +// returning error diagnostics if any aspect of the parameter cannot be merged +// into the receiver for some reason. +// +// User expectation is that anything _explicitly_ set in the given object +// should take precedence over the corresponding settings in the receiver, +// but that anything omitted in the given object should be left unchanged. +// In some cases it may be reasonable to do a "deep merge" of certain nested +// features, if it is possible to unambiguously correlate the nested elements +// and their behaviors are orthogonal to each other. + +func (p *Provider) merge(op *Provider) hcl.Diagnostics { + var diags hcl.Diagnostics + + if op.Version.Required != nil { + p.Version = op.Version + } + + p.Config = MergeBodies(p.Config, op.Config) + + return diags +} + +func mergeProviderVersionConstraints(recv map[string][]VersionConstraint, ovrd []*ProviderRequirement) { + // Any provider name that's mentioned in the override gets nilled out in + // our map so that we'll rebuild it below. Any provider not mentioned is + // left unchanged. + for _, reqd := range ovrd { + delete(recv, reqd.Name) + } + for _, reqd := range ovrd { + recv[reqd.Name] = append(recv[reqd.Name], reqd.Requirement) + } +} + +func (v *Variable) merge(ov *Variable) hcl.Diagnostics { + var diags hcl.Diagnostics + + if ov.DescriptionSet { + v.Description = ov.Description + v.DescriptionSet = ov.DescriptionSet + } + if ov.Default != cty.NilVal { + v.Default = ov.Default + } + if ov.Type != cty.NilType { + v.Type = ov.Type + } + if ov.ParsingMode != 0 { + v.ParsingMode = ov.ParsingMode + } + + // If the override file overrode type without default or vice-versa then + // it may have created an invalid situation, which we'll catch now by + // attempting to re-convert the value. + // + // Note that here we may be re-converting an already-converted base value + // from the base config. This will be a no-op if the type was not changed, + // but in particular might be user-observable in the edge case where the + // literal value in config could've been converted to the overridden type + // constraint but the converted value cannot. In practice, this situation + // should be rare since most of our conversions are interchangable. + if v.Default != cty.NilVal { + val, err := convert.Convert(v.Default, v.Type) + if err != nil { + // What exactly we'll say in the error message here depends on whether + // it was Default or Type that was overridden here. + switch { + case ov.Type != cty.NilType && ov.Default == cty.NilVal: + // If only the type was overridden + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid default value for variable", + Detail: fmt.Sprintf("Overriding this variable's type constraint has made its default value invalid: %s.", err), + Subject: &ov.DeclRange, + }) + case ov.Type == cty.NilType && ov.Default != cty.NilVal: + // Only the default was overridden + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid default value for variable", + Detail: fmt.Sprintf("The overridden default value for this variable is not compatible with the variable's type constraint: %s.", err), + Subject: &ov.DeclRange, + }) + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid default value for variable", + Detail: fmt.Sprintf("This variable's default value is not compatible with its type constraint: %s.", err), + Subject: &ov.DeclRange, + }) + } + } else { + v.Default = val + } + } + + return diags +} + +func (l *Local) merge(ol *Local) hcl.Diagnostics { + var diags hcl.Diagnostics + + // Since a local is just a single expression in configuration, the + // override definition entirely replaces the base definition, including + // the source range so that we'll send the user to the right place if + // there is an error. + l.Expr = ol.Expr + l.DeclRange = ol.DeclRange + + return diags +} + +func (o *Output) merge(oo *Output) hcl.Diagnostics { + var diags hcl.Diagnostics + + if oo.Description != "" { + o.Description = oo.Description + } + if oo.Expr != nil { + o.Expr = oo.Expr + } + if oo.SensitiveSet { + o.Sensitive = oo.Sensitive + o.SensitiveSet = oo.SensitiveSet + } + + // We don't allow depends_on to be overridden because that is likely to + // cause confusing misbehavior. + if len(oo.DependsOn) != 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported override", + Detail: "The depends_on argument may not be overridden.", + Subject: oo.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have + }) + } + + return diags +} + +func (mc *ModuleCall) merge(omc *ModuleCall) hcl.Diagnostics { + var diags hcl.Diagnostics + + if omc.SourceSet { + mc.SourceAddr = omc.SourceAddr + mc.SourceAddrRange = omc.SourceAddrRange + mc.SourceSet = omc.SourceSet + } + + if omc.Count != nil { + mc.Count = omc.Count + } + + if omc.ForEach != nil { + mc.ForEach = omc.ForEach + } + + if len(omc.Version.Required) != 0 { + mc.Version = omc.Version + } + + mc.Config = MergeBodies(mc.Config, omc.Config) + + // We don't allow depends_on to be overridden because that is likely to + // cause confusing misbehavior. + if len(mc.DependsOn) != 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported override", + Detail: "The depends_on argument may not be overridden.", + Subject: mc.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have + }) + } + + return diags +} + +func (r *Resource) merge(or *Resource) hcl.Diagnostics { + var diags hcl.Diagnostics + + if r.Mode != or.Mode { + // This is always a programming error, since managed and data resources + // are kept in separate maps in the configuration structures. + panic(fmt.Errorf("can't merge %s into %s", or.Mode, r.Mode)) + } + + if or.Count != nil { + r.Count = or.Count + } + if or.ForEach != nil { + r.ForEach = or.ForEach + } + if or.ProviderConfigRef != nil { + r.ProviderConfigRef = or.ProviderConfigRef + } + if r.Mode == addrs.ManagedResourceMode { + // or.Managed is always non-nil for managed resource mode + + if or.Managed.Connection != nil { + r.Managed.Connection = or.Managed.Connection + } + if or.Managed.CreateBeforeDestroySet { + r.Managed.CreateBeforeDestroy = or.Managed.CreateBeforeDestroy + r.Managed.CreateBeforeDestroySet = or.Managed.CreateBeforeDestroySet + } + if len(or.Managed.IgnoreChanges) != 0 { + r.Managed.IgnoreChanges = or.Managed.IgnoreChanges + } + if or.Managed.PreventDestroySet { + r.Managed.PreventDestroy = or.Managed.PreventDestroy + r.Managed.PreventDestroySet = or.Managed.PreventDestroySet + } + if len(or.Managed.Provisioners) != 0 { + r.Managed.Provisioners = or.Managed.Provisioners + } + } + + r.Config = MergeBodies(r.Config, or.Config) + + // We don't allow depends_on to be overridden because that is likely to + // cause confusing misbehavior. + if len(or.DependsOn) != 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported override", + Detail: "The depends_on argument may not be overridden.", + Subject: or.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have + }) + } + + return diags +} diff --git a/vendor/github.com/hashicorp/terraform/configs/module_merge_body.go b/vendor/github.com/hashicorp/terraform/configs/module_merge_body.go new file mode 100644 index 0000000000..0ed561eeee --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/module_merge_body.go @@ -0,0 +1,143 @@ +package configs + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// MergeBodies creates a new HCL body that contains a combination of the +// given base and override bodies. Attributes and blocks defined in the +// override body take precedence over those of the same name defined in +// the base body. +// +// If any block of a particular type appears in "override" then it will +// replace _all_ of the blocks of the same type in "base" in the new +// body. +func MergeBodies(base, override hcl.Body) hcl.Body { + return mergeBody{ + Base: base, + Override: override, + } +} + +// mergeBody is a hcl.Body implementation that wraps a pair of other bodies +// and allows attributes and blocks within the override to take precedence +// over those defined in the base body. +// +// This is used to deal with dynamically-processed bodies in Module.mergeFile. +// It uses a shallow-only merging strategy where direct attributes defined +// in Override will override attributes of the same name in Base, while any +// blocks defined in Override will hide all blocks of the same type in Base. +// +// This cannot possibly "do the right thing" in all cases, because we don't +// have enough information about user intent. However, this behavior is intended +// to be reasonable for simple overriding use-cases. +type mergeBody struct { + Base hcl.Body + Override hcl.Body +} + +var _ hcl.Body = mergeBody{} + +func (b mergeBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + var diags hcl.Diagnostics + baseSchema := schemaWithDynamic(schema) + overrideSchema := schemaWithDynamic(schemaForOverrides(schema)) + + baseContent, _, cDiags := b.Base.PartialContent(baseSchema) + diags = append(diags, cDiags...) + overrideContent, _, cDiags := b.Override.PartialContent(overrideSchema) + diags = append(diags, cDiags...) + + content := b.prepareContent(baseContent, overrideContent) + + return content, diags +} + +func (b mergeBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + var diags hcl.Diagnostics + baseSchema := schemaWithDynamic(schema) + overrideSchema := schemaWithDynamic(schemaForOverrides(schema)) + + baseContent, baseRemain, cDiags := b.Base.PartialContent(baseSchema) + diags = append(diags, cDiags...) + overrideContent, overrideRemain, cDiags := b.Override.PartialContent(overrideSchema) + diags = append(diags, cDiags...) + + content := b.prepareContent(baseContent, overrideContent) + + remain := MergeBodies(baseRemain, overrideRemain) + + return content, remain, diags +} + +func (b mergeBody) prepareContent(base *hcl.BodyContent, override *hcl.BodyContent) *hcl.BodyContent { + content := &hcl.BodyContent{ + Attributes: make(hcl.Attributes), + } + + // For attributes we just assign from each map in turn and let the override + // map clobber any matching entries from base. + for k, a := range base.Attributes { + content.Attributes[k] = a + } + for k, a := range override.Attributes { + content.Attributes[k] = a + } + + // Things are a little more interesting for blocks because they arrive + // as a flat list. Our merging semantics call for us to suppress blocks + // from base if at least one block of the same type appears in override. + // We explicitly do not try to correlate and deeply merge nested blocks, + // since we don't have enough context here to infer user intent. + + overriddenBlockTypes := make(map[string]bool) + for _, block := range override.Blocks { + if block.Type == "dynamic" { + overriddenBlockTypes[block.Labels[0]] = true + continue + } + overriddenBlockTypes[block.Type] = true + } + for _, block := range base.Blocks { + // We skip over dynamic blocks whose type label is an overridden type + // but note that below we do still leave them as dynamic blocks in + // the result because expanding the dynamic blocks that are left is + // done much later during the core graph walks, where we can safely + // evaluate the expressions. + if block.Type == "dynamic" && overriddenBlockTypes[block.Labels[0]] { + continue + } + if overriddenBlockTypes[block.Type] { + continue + } + content.Blocks = append(content.Blocks, block) + } + for _, block := range override.Blocks { + content.Blocks = append(content.Blocks, block) + } + + return content +} + +func (b mergeBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + var diags hcl.Diagnostics + ret := make(hcl.Attributes) + + baseAttrs, aDiags := b.Base.JustAttributes() + diags = append(diags, aDiags...) + overrideAttrs, aDiags := b.Override.JustAttributes() + diags = append(diags, aDiags...) + + for k, a := range baseAttrs { + ret[k] = a + } + for k, a := range overrideAttrs { + ret[k] = a + } + + return ret, diags +} + +func (b mergeBody) MissingItemRange() hcl.Range { + return b.Base.MissingItemRange() +} diff --git a/vendor/github.com/hashicorp/terraform/configs/named_values.go b/vendor/github.com/hashicorp/terraform/configs/named_values.go new file mode 100644 index 0000000000..6f6b469fd7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/named_values.go @@ -0,0 +1,364 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl2/ext/typeexpr" + "github.com/hashicorp/hcl2/gohcl" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + + "github.com/hashicorp/terraform/addrs" +) + +// A consistent detail message for all "not a valid identifier" diagnostics. +const badIdentifierDetail = "A name must start with a letter and may contain only letters, digits, underscores, and dashes." + +// Variable represents a "variable" block in a module or file. +type Variable struct { + Name string + Description string + Default cty.Value + Type cty.Type + ParsingMode VariableParsingMode + + DescriptionSet bool + + DeclRange hcl.Range +} + +func decodeVariableBlock(block *hcl.Block, override bool) (*Variable, hcl.Diagnostics) { + v := &Variable{ + Name: block.Labels[0], + DeclRange: block.DefRange, + } + + // Unless we're building an override, we'll set some defaults + // which we might override with attributes below. We leave these + // as zero-value in the override case so we can recognize whether + // or not they are set when we merge. + if !override { + v.Type = cty.DynamicPseudoType + v.ParsingMode = VariableParseLiteral + } + + content, diags := block.Body.Content(variableBlockSchema) + + if !hclsyntax.ValidIdentifier(v.Name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid variable name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[0], + }) + } + + // Don't allow declaration of variables that would conflict with the + // reserved attribute and block type names in a "module" block, since + // these won't be usable for child modules. + for _, attr := range moduleBlockSchema.Attributes { + if attr.Name == v.Name { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid variable name", + Detail: fmt.Sprintf("The variable name %q is reserved due to its special meaning inside module blocks.", attr.Name), + Subject: &block.LabelRanges[0], + }) + } + } + for _, blockS := range moduleBlockSchema.Blocks { + if blockS.Type == v.Name { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid variable name", + Detail: fmt.Sprintf("The variable name %q is reserved due to its special meaning inside module blocks.", blockS.Type), + Subject: &block.LabelRanges[0], + }) + } + } + + if attr, exists := content.Attributes["description"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &v.Description) + diags = append(diags, valDiags...) + v.DescriptionSet = true + } + + if attr, exists := content.Attributes["type"]; exists { + ty, parseMode, tyDiags := decodeVariableType(attr.Expr) + diags = append(diags, tyDiags...) + v.Type = ty + v.ParsingMode = parseMode + } + + if attr, exists := content.Attributes["default"]; exists { + val, valDiags := attr.Expr.Value(nil) + diags = append(diags, valDiags...) + + // Convert the default to the expected type so we can catch invalid + // defaults early and allow later code to assume validity. + // Note that this depends on us having already processed any "type" + // attribute above. + // However, we can't do this if we're in an override file where + // the type might not be set; we'll catch that during merge. + if v.Type != cty.NilType { + var err error + val, err = convert.Convert(val, v.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid default value for variable", + Detail: fmt.Sprintf("This default value is not compatible with the variable's type constraint: %s.", err), + Subject: attr.Expr.Range().Ptr(), + }) + val = cty.DynamicVal + } + } + + v.Default = val + } + + return v, diags +} + +func decodeVariableType(expr hcl.Expression) (cty.Type, VariableParsingMode, hcl.Diagnostics) { + if exprIsNativeQuotedString(expr) { + // Here we're accepting the pre-0.12 form of variable type argument where + // the string values "string", "list" and "map" are accepted has a hint + // about the type used primarily for deciding how to parse values + // given on the command line and in environment variables. + // Only the native syntax ends up in this codepath; we handle the + // JSON syntax (which is, of course, quoted even in the new format) + // in the normal codepath below. + val, diags := expr.Value(nil) + if diags.HasErrors() { + return cty.DynamicPseudoType, VariableParseHCL, diags + } + str := val.AsString() + switch str { + case "string": + return cty.String, VariableParseLiteral, diags + case "list": + return cty.List(cty.DynamicPseudoType), VariableParseHCL, diags + case "map": + return cty.Map(cty.DynamicPseudoType), VariableParseHCL, diags + default: + return cty.DynamicPseudoType, VariableParseHCL, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: "Invalid legacy variable type hint", + Detail: `The legacy variable type hint form, using a quoted string, allows only the values "string", "list", and "map". To provide a full type expression, remove the surrounding quotes and give the type expression directly.`, + Subject: expr.Range().Ptr(), + }} + } + } + + // First we'll deal with some shorthand forms that the HCL-level type + // expression parser doesn't include. These both emulate pre-0.12 behavior + // of allowing a list or map of any element type as long as all of the + // elements are consistent. This is the same as list(any) or map(any). + switch hcl.ExprAsKeyword(expr) { + case "list": + return cty.List(cty.DynamicPseudoType), VariableParseHCL, nil + case "map": + return cty.Map(cty.DynamicPseudoType), VariableParseHCL, nil + } + + ty, diags := typeexpr.TypeConstraint(expr) + if diags.HasErrors() { + return cty.DynamicPseudoType, VariableParseHCL, diags + } + + switch { + case ty.IsPrimitiveType(): + // Primitive types use literal parsing. + return ty, VariableParseLiteral, diags + default: + // Everything else uses HCL parsing + return ty, VariableParseHCL, diags + } +} + +// VariableParsingMode defines how values of a particular variable given by +// text-only mechanisms (command line arguments and environment variables) +// should be parsed to produce the final value. +type VariableParsingMode rune + +// VariableParseLiteral is a variable parsing mode that just takes the given +// string directly as a cty.String value. +const VariableParseLiteral VariableParsingMode = 'L' + +// VariableParseHCL is a variable parsing mode that attempts to parse the given +// string as an HCL expression and returns the result. +const VariableParseHCL VariableParsingMode = 'H' + +// Parse uses the receiving parsing mode to process the given variable value +// string, returning the result along with any diagnostics. +// +// A VariableParsingMode does not know the expected type of the corresponding +// variable, so it's the caller's responsibility to attempt to convert the +// result to the appropriate type and return to the user any diagnostics that +// conversion may produce. +// +// The given name is used to create a synthetic filename in case any diagnostics +// must be generated about the given string value. This should be the name +// of the root module variable whose value will be populated from the given +// string. +// +// If the returned diagnostics has errors, the returned value may not be +// valid. +func (m VariableParsingMode) Parse(name, value string) (cty.Value, hcl.Diagnostics) { + switch m { + case VariableParseLiteral: + return cty.StringVal(value), nil + case VariableParseHCL: + fakeFilename := fmt.Sprintf("", name) + expr, diags := hclsyntax.ParseExpression([]byte(value), fakeFilename, hcl.Pos{Line: 1, Column: 1}) + if diags.HasErrors() { + return cty.DynamicVal, diags + } + val, valDiags := expr.Value(nil) + diags = append(diags, valDiags...) + return val, diags + default: + // Should never happen + panic(fmt.Errorf("Parse called on invalid VariableParsingMode %#v", m)) + } +} + +// Output represents an "output" block in a module or file. +type Output struct { + Name string + Description string + Expr hcl.Expression + DependsOn []hcl.Traversal + Sensitive bool + + DescriptionSet bool + SensitiveSet bool + + DeclRange hcl.Range +} + +func decodeOutputBlock(block *hcl.Block, override bool) (*Output, hcl.Diagnostics) { + o := &Output{ + Name: block.Labels[0], + DeclRange: block.DefRange, + } + + schema := outputBlockSchema + if override { + schema = schemaForOverrides(schema) + } + + content, diags := block.Body.Content(schema) + + if !hclsyntax.ValidIdentifier(o.Name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid output name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[0], + }) + } + + if attr, exists := content.Attributes["description"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &o.Description) + diags = append(diags, valDiags...) + o.DescriptionSet = true + } + + if attr, exists := content.Attributes["value"]; exists { + o.Expr = attr.Expr + } + + if attr, exists := content.Attributes["sensitive"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &o.Sensitive) + diags = append(diags, valDiags...) + o.SensitiveSet = true + } + + if attr, exists := content.Attributes["depends_on"]; exists { + deps, depsDiags := decodeDependsOn(attr) + diags = append(diags, depsDiags...) + o.DependsOn = append(o.DependsOn, deps...) + } + + return o, diags +} + +// Local represents a single entry from a "locals" block in a module or file. +// The "locals" block itself is not represented, because it serves only to +// provide context for us to interpret its contents. +type Local struct { + Name string + Expr hcl.Expression + + DeclRange hcl.Range +} + +func decodeLocalsBlock(block *hcl.Block) ([]*Local, hcl.Diagnostics) { + attrs, diags := block.Body.JustAttributes() + if len(attrs) == 0 { + return nil, diags + } + + locals := make([]*Local, 0, len(attrs)) + for name, attr := range attrs { + if !hclsyntax.ValidIdentifier(name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid local value name", + Detail: badIdentifierDetail, + Subject: &attr.NameRange, + }) + } + + locals = append(locals, &Local{ + Name: name, + Expr: attr.Expr, + DeclRange: attr.Range, + }) + } + return locals, diags +} + +// Addr returns the address of the local value declared by the receiver, +// relative to its containing module. +func (l *Local) Addr() addrs.LocalValue { + return addrs.LocalValue{ + Name: l.Name, + } +} + +var variableBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "description", + }, + { + Name: "default", + }, + { + Name: "type", + }, + }, +} + +var outputBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "description", + }, + { + Name: "value", + Required: true, + }, + { + Name: "depends_on", + }, + { + Name: "sensitive", + }, + }, +} diff --git a/vendor/github.com/hashicorp/terraform/configs/parser.go b/vendor/github.com/hashicorp/terraform/configs/parser.go new file mode 100644 index 0000000000..8176fa1b77 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/parser.go @@ -0,0 +1,100 @@ +package configs + +import ( + "fmt" + "strings" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hclparse" + "github.com/spf13/afero" +) + +// Parser is the main interface to read configuration files and other related +// files from disk. +// +// It retains a cache of all files that are loaded so that they can be used +// to create source code snippets in diagnostics, etc. +type Parser struct { + fs afero.Afero + p *hclparse.Parser +} + +// NewParser creates and returns a new Parser that reads files from the given +// filesystem. If a nil filesystem is passed then the system's "real" filesystem +// will be used, via afero.OsFs. +func NewParser(fs afero.Fs) *Parser { + if fs == nil { + fs = afero.OsFs{} + } + + return &Parser{ + fs: afero.Afero{Fs: fs}, + p: hclparse.NewParser(), + } +} + +// LoadHCLFile is a low-level method that reads the file at the given path, +// parses it, and returns the hcl.Body representing its root. In many cases +// it is better to use one of the other Load*File methods on this type, +// which additionally decode the root body in some way and return a higher-level +// construct. +// +// If the file cannot be read at all -- e.g. because it does not exist -- then +// this method will return a nil body and error diagnostics. In this case +// callers may wish to ignore the provided error diagnostics and produce +// a more context-sensitive error instead. +// +// The file will be parsed using the HCL native syntax unless the filename +// ends with ".json", in which case the HCL JSON syntax will be used. +func (p *Parser) LoadHCLFile(path string) (hcl.Body, hcl.Diagnostics) { + src, err := p.fs.ReadFile(path) + + if err != nil { + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Failed to read file", + Detail: fmt.Sprintf("The file %q could not be read.", path), + }, + } + } + + var file *hcl.File + var diags hcl.Diagnostics + switch { + case strings.HasSuffix(path, ".json"): + file, diags = p.p.ParseJSON(src, path) + default: + file, diags = p.p.ParseHCL(src, path) + } + + // If the returned file or body is nil, then we'll return a non-nil empty + // body so we'll meet our contract that nil means an error reading the file. + if file == nil || file.Body == nil { + return hcl.EmptyBody(), diags + } + + return file.Body, diags +} + +// Sources returns a map of the cached source buffers for all files that +// have been loaded through this parser, with source filenames (as requested +// when each file was opened) as the keys. +func (p *Parser) Sources() map[string][]byte { + return p.p.Sources() +} + +// ForceFileSource artificially adds source code to the cache of file sources, +// as if it had been loaded from the given filename. +// +// This should be used only in special situations where configuration is loaded +// some other way. Most callers should load configuration via methods of +// Parser, which will update the sources cache automatically. +func (p *Parser) ForceFileSource(filename string, src []byte) { + // We'll make a synthetic hcl.File here just so we can reuse the + // existing cache. + p.p.AddFile(filename, &hcl.File{ + Body: hcl.EmptyBody(), + Bytes: src, + }) +} diff --git a/vendor/github.com/hashicorp/terraform/configs/parser_config.go b/vendor/github.com/hashicorp/terraform/configs/parser_config.go new file mode 100644 index 0000000000..7f2ff27142 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/parser_config.go @@ -0,0 +1,247 @@ +package configs + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// LoadConfigFile reads the file at the given path and parses it as a config +// file. +// +// If the file cannot be read -- for example, if it does not exist -- then +// a nil *File will be returned along with error diagnostics. Callers may wish +// to disregard the returned diagnostics in this case and instead generate +// their own error message(s) with additional context. +// +// If the returned diagnostics has errors when a non-nil map is returned +// then the map may be incomplete but should be valid enough for careful +// static analysis. +// +// This method wraps LoadHCLFile, and so it inherits the syntax selection +// behaviors documented for that method. +func (p *Parser) LoadConfigFile(path string) (*File, hcl.Diagnostics) { + return p.loadConfigFile(path, false) +} + +// LoadConfigFileOverride is the same as LoadConfigFile except that it relaxes +// certain required attribute constraints in order to interpret the given +// file as an overrides file. +func (p *Parser) LoadConfigFileOverride(path string) (*File, hcl.Diagnostics) { + return p.loadConfigFile(path, true) +} + +func (p *Parser) loadConfigFile(path string, override bool) (*File, hcl.Diagnostics) { + + body, diags := p.LoadHCLFile(path) + if body == nil { + return nil, diags + } + + file := &File{} + + var reqDiags hcl.Diagnostics + file.CoreVersionConstraints, reqDiags = sniffCoreVersionRequirements(body) + diags = append(diags, reqDiags...) + + content, contentDiags := body.Content(configFileSchema) + diags = append(diags, contentDiags...) + + for _, block := range content.Blocks { + switch block.Type { + + case "terraform": + content, contentDiags := block.Body.Content(terraformBlockSchema) + diags = append(diags, contentDiags...) + + // We ignore the "terraform_version" attribute here because + // sniffCoreVersionRequirements already dealt with that above. + + for _, innerBlock := range content.Blocks { + switch innerBlock.Type { + + case "backend": + backendCfg, cfgDiags := decodeBackendBlock(innerBlock) + diags = append(diags, cfgDiags...) + if backendCfg != nil { + file.Backends = append(file.Backends, backendCfg) + } + + case "required_providers": + reqs, reqsDiags := decodeRequiredProvidersBlock(innerBlock) + diags = append(diags, reqsDiags...) + file.ProviderRequirements = append(file.ProviderRequirements, reqs...) + + default: + // Should never happen because the above cases should be exhaustive + // for all block type names in our schema. + continue + + } + } + + case "provider": + cfg, cfgDiags := decodeProviderBlock(block) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.ProviderConfigs = append(file.ProviderConfigs, cfg) + } + + case "variable": + cfg, cfgDiags := decodeVariableBlock(block, override) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.Variables = append(file.Variables, cfg) + } + + case "locals": + defs, defsDiags := decodeLocalsBlock(block) + diags = append(diags, defsDiags...) + file.Locals = append(file.Locals, defs...) + + case "output": + cfg, cfgDiags := decodeOutputBlock(block, override) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.Outputs = append(file.Outputs, cfg) + } + + case "module": + cfg, cfgDiags := decodeModuleBlock(block, override) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.ModuleCalls = append(file.ModuleCalls, cfg) + } + + case "resource": + cfg, cfgDiags := decodeResourceBlock(block) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.ManagedResources = append(file.ManagedResources, cfg) + } + + case "data": + cfg, cfgDiags := decodeDataBlock(block) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.DataResources = append(file.DataResources, cfg) + } + + default: + // Should never happen because the above cases should be exhaustive + // for all block type names in our schema. + continue + + } + } + + return file, diags +} + +// sniffCoreVersionRequirements does minimal parsing of the given body for +// "terraform" blocks with "required_version" attributes, returning the +// requirements found. +// +// This is intended to maximize the chance that we'll be able to read the +// requirements (syntax errors notwithstanding) even if the config file contains +// constructs that might've been added in future Terraform versions +// +// This is a "best effort" sort of method which will return constraints it is +// able to find, but may return no constraints at all if the given body is +// so invalid that it cannot be decoded at all. +func sniffCoreVersionRequirements(body hcl.Body) ([]VersionConstraint, hcl.Diagnostics) { + rootContent, _, diags := body.PartialContent(configFileVersionSniffRootSchema) + + var constraints []VersionConstraint + + for _, block := range rootContent.Blocks { + content, _, blockDiags := block.Body.PartialContent(configFileVersionSniffBlockSchema) + diags = append(diags, blockDiags...) + + attr, exists := content.Attributes["required_version"] + if !exists { + continue + } + + constraint, constraintDiags := decodeVersionConstraint(attr) + diags = append(diags, constraintDiags...) + if !constraintDiags.HasErrors() { + constraints = append(constraints, constraint) + } + } + + return constraints, diags +} + +// configFileSchema is the schema for the top-level of a config file. We use +// the low-level HCL API for this level so we can easily deal with each +// block type separately with its own decoding logic. +var configFileSchema = &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "terraform", + }, + { + Type: "provider", + LabelNames: []string{"name"}, + }, + { + Type: "variable", + LabelNames: []string{"name"}, + }, + { + Type: "locals", + }, + { + Type: "output", + LabelNames: []string{"name"}, + }, + { + Type: "module", + LabelNames: []string{"name"}, + }, + { + Type: "resource", + LabelNames: []string{"type", "name"}, + }, + { + Type: "data", + LabelNames: []string{"type", "name"}, + }, + }, +} + +// terraformBlockSchema is the schema for a top-level "terraform" block in +// a configuration file. +var terraformBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "required_version", + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "backend", + LabelNames: []string{"type"}, + }, + { + Type: "required_providers", + }, + }, +} + +// configFileVersionSniffRootSchema is a schema for sniffCoreVersionRequirements +var configFileVersionSniffRootSchema = &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "terraform", + }, + }, +} + +// configFileVersionSniffBlockSchema is a schema for sniffCoreVersionRequirements +var configFileVersionSniffBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "required_version", + }, + }, +} diff --git a/vendor/github.com/hashicorp/terraform/configs/parser_config_dir.go b/vendor/github.com/hashicorp/terraform/configs/parser_config_dir.go new file mode 100644 index 0000000000..3014cb4b41 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/parser_config_dir.go @@ -0,0 +1,142 @@ +package configs + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/hashicorp/hcl2/hcl" +) + +// LoadConfigDir reads the .tf and .tf.json files in the given directory +// as config files (using LoadConfigFile) and then combines these files into +// a single Module. +// +// If this method returns nil, that indicates that the given directory does not +// exist at all or could not be opened for some reason. Callers may wish to +// detect this case and ignore the returned diagnostics so that they can +// produce a more context-aware error message in that case. +// +// If this method returns a non-nil module while error diagnostics are returned +// then the module may be incomplete but can be used carefully for static +// analysis. +// +// This file does not consider a directory with no files to be an error, and +// will simply return an empty module in that case. Callers should first call +// Parser.IsConfigDir if they wish to recognize that situation. +// +// .tf files are parsed using the HCL native syntax while .tf.json files are +// parsed using the HCL JSON syntax. +func (p *Parser) LoadConfigDir(path string) (*Module, hcl.Diagnostics) { + primaryPaths, overridePaths, diags := p.dirFiles(path) + if diags.HasErrors() { + return nil, diags + } + + primary, fDiags := p.loadFiles(primaryPaths, false) + diags = append(diags, fDiags...) + override, fDiags := p.loadFiles(overridePaths, true) + diags = append(diags, fDiags...) + + mod, modDiags := NewModule(primary, override) + diags = append(diags, modDiags...) + + mod.SourceDir = path + + return mod, diags +} + +// ConfigDirFiles returns lists of the primary and override files configuration +// files in the given directory. +// +// If the given directory does not exist or cannot be read, error diagnostics +// are returned. If errors are returned, the resulting lists may be incomplete. +func (p Parser) ConfigDirFiles(dir string) (primary, override []string, diags hcl.Diagnostics) { + return p.dirFiles(dir) +} + +// IsConfigDir determines whether the given path refers to a directory that +// exists and contains at least one Terraform config file (with a .tf or +// .tf.json extension.) +func (p *Parser) IsConfigDir(path string) bool { + primaryPaths, overridePaths, _ := p.dirFiles(path) + return (len(primaryPaths) + len(overridePaths)) > 0 +} + +func (p *Parser) loadFiles(paths []string, override bool) ([]*File, hcl.Diagnostics) { + var files []*File + var diags hcl.Diagnostics + + for _, path := range paths { + var f *File + var fDiags hcl.Diagnostics + if override { + f, fDiags = p.LoadConfigFileOverride(path) + } else { + f, fDiags = p.LoadConfigFile(path) + } + diags = append(diags, fDiags...) + if f != nil { + files = append(files, f) + } + } + + return files, diags +} + +func (p *Parser) dirFiles(dir string) (primary, override []string, diags hcl.Diagnostics) { + infos, err := p.fs.ReadDir(dir) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Failed to read module directory", + Detail: fmt.Sprintf("Module directory %s does not exist or cannot be read.", dir), + }) + return + } + + for _, info := range infos { + if info.IsDir() { + // We only care about files + continue + } + + name := info.Name() + ext := fileExt(name) + if ext == "" || IsIgnoredFile(name) { + continue + } + + baseName := name[:len(name)-len(ext)] // strip extension + isOverride := baseName == "override" || strings.HasSuffix(baseName, "_override") + + fullPath := filepath.Join(dir, name) + if isOverride { + override = append(override, fullPath) + } else { + primary = append(primary, fullPath) + } + } + + return +} + +// fileExt returns the Terraform configuration extension of the given +// path, or a blank string if it is not a recognized extension. +func fileExt(path string) string { + if strings.HasSuffix(path, ".tf") { + return ".tf" + } else if strings.HasSuffix(path, ".tf.json") { + return ".tf.json" + } else { + return "" + } +} + +// IsIgnoredFile returns true if the given filename (which must not have a +// directory path ahead of it) should be ignored as e.g. an editor swap file. +func IsIgnoredFile(name string) bool { + return strings.HasPrefix(name, ".") || // Unix-like hidden files + strings.HasSuffix(name, "~") || // vim + strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs +} diff --git a/vendor/github.com/hashicorp/terraform/configs/parser_values.go b/vendor/github.com/hashicorp/terraform/configs/parser_values.go new file mode 100644 index 0000000000..b7f1c1c5de --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/parser_values.go @@ -0,0 +1,43 @@ +package configs + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +// LoadValuesFile reads the file at the given path and parses it as a "values +// file", which is an HCL config file whose top-level attributes are treated +// as arbitrary key.value pairs. +// +// If the file cannot be read -- for example, if it does not exist -- then +// a nil map will be returned along with error diagnostics. Callers may wish +// to disregard the returned diagnostics in this case and instead generate +// their own error message(s) with additional context. +// +// If the returned diagnostics has errors when a non-nil map is returned +// then the map may be incomplete but should be valid enough for careful +// static analysis. +// +// This method wraps LoadHCLFile, and so it inherits the syntax selection +// behaviors documented for that method. +func (p *Parser) LoadValuesFile(path string) (map[string]cty.Value, hcl.Diagnostics) { + body, diags := p.LoadHCLFile(path) + if body == nil { + return nil, diags + } + + vals := make(map[string]cty.Value) + attrs, attrDiags := body.JustAttributes() + diags = append(diags, attrDiags...) + if attrs == nil { + return vals, diags + } + + for name, attr := range attrs { + val, valDiags := attr.Expr.Value(nil) + diags = append(diags, valDiags...) + vals[name] = val + } + + return vals, diags +} diff --git a/vendor/github.com/hashicorp/terraform/configs/provider.go b/vendor/github.com/hashicorp/terraform/configs/provider.go new file mode 100644 index 0000000000..d01d5cf2e5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/provider.go @@ -0,0 +1,144 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl2/gohcl" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + + "github.com/hashicorp/terraform/addrs" +) + +// Provider represents a "provider" block in a module or file. A provider +// block is a provider configuration, and there can be zero or more +// configurations for each actual provider. +type Provider struct { + Name string + NameRange hcl.Range + Alias string + AliasRange *hcl.Range // nil if no alias set + + Version VersionConstraint + + Config hcl.Body + + DeclRange hcl.Range +} + +func decodeProviderBlock(block *hcl.Block) (*Provider, hcl.Diagnostics) { + content, config, diags := block.Body.PartialContent(providerBlockSchema) + + provider := &Provider{ + Name: block.Labels[0], + NameRange: block.LabelRanges[0], + Config: config, + DeclRange: block.DefRange, + } + + if attr, exists := content.Attributes["alias"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &provider.Alias) + diags = append(diags, valDiags...) + provider.AliasRange = attr.Expr.Range().Ptr() + + if !hclsyntax.ValidIdentifier(provider.Alias) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration alias", + Detail: fmt.Sprintf("An alias must be a valid name. %s", badIdentifierDetail), + }) + } + } + + if attr, exists := content.Attributes["version"]; exists { + var versionDiags hcl.Diagnostics + provider.Version, versionDiags = decodeVersionConstraint(attr) + diags = append(diags, versionDiags...) + } + + // Reserved attribute names + for _, name := range []string{"count", "depends_on", "for_each", "source"} { + if attr, exists := content.Attributes[name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved argument name in provider block", + Detail: fmt.Sprintf("The provider argument name %q is reserved for use by Terraform in a future version.", name), + Subject: &attr.NameRange, + }) + } + } + + // Reserved block types (all of them) + for _, block := range content.Blocks { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved block type name in provider block", + Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), + Subject: &block.TypeRange, + }) + } + + return provider, diags +} + +// Addr returns the address of the receiving provider configuration, relative +// to its containing module. +func (p *Provider) Addr() addrs.ProviderConfig { + return addrs.ProviderConfig{ + Type: p.Name, + Alias: p.Alias, + } +} + +func (p *Provider) moduleUniqueKey() string { + if p.Alias != "" { + return fmt.Sprintf("%s.%s", p.Name, p.Alias) + } + return p.Name +} + +// ProviderRequirement represents a declaration of a dependency on a particular +// provider version without actually configuring that provider. This is used in +// child modules that expect a provider to be passed in from their parent. +type ProviderRequirement struct { + Name string + Requirement VersionConstraint +} + +func decodeRequiredProvidersBlock(block *hcl.Block) ([]*ProviderRequirement, hcl.Diagnostics) { + attrs, diags := block.Body.JustAttributes() + var reqs []*ProviderRequirement + for name, attr := range attrs { + req, reqDiags := decodeVersionConstraint(attr) + diags = append(diags, reqDiags...) + if !diags.HasErrors() { + reqs = append(reqs, &ProviderRequirement{ + Name: name, + Requirement: req, + }) + } + } + return reqs, diags +} + +var providerBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "alias", + }, + { + Name: "version", + }, + + // Attribute names reserved for future expansion. + {Name: "count"}, + {Name: "depends_on"}, + {Name: "for_each"}, + {Name: "source"}, + }, + Blocks: []hcl.BlockHeaderSchema{ + // _All_ of these are reserved for future expansion. + {Type: "lifecycle"}, + {Type: "locals"}, + }, +} diff --git a/vendor/github.com/hashicorp/terraform/configs/provisioner.go b/vendor/github.com/hashicorp/terraform/configs/provisioner.go new file mode 100644 index 0000000000..b031dd0b0a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/provisioner.go @@ -0,0 +1,150 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" +) + +// Provisioner represents a "provisioner" block when used within a +// "resource" block in a module or file. +type Provisioner struct { + Type string + Config hcl.Body + Connection *Connection + When ProvisionerWhen + OnFailure ProvisionerOnFailure + + DeclRange hcl.Range + TypeRange hcl.Range +} + +func decodeProvisionerBlock(block *hcl.Block) (*Provisioner, hcl.Diagnostics) { + pv := &Provisioner{ + Type: block.Labels[0], + TypeRange: block.LabelRanges[0], + DeclRange: block.DefRange, + When: ProvisionerWhenCreate, + OnFailure: ProvisionerOnFailureFail, + } + + content, config, diags := block.Body.PartialContent(provisionerBlockSchema) + pv.Config = config + + if attr, exists := content.Attributes["when"]; exists { + expr, shimDiags := shimTraversalInString(attr.Expr, true) + diags = append(diags, shimDiags...) + + switch hcl.ExprAsKeyword(expr) { + case "create": + pv.When = ProvisionerWhenCreate + case "destroy": + pv.When = ProvisionerWhenDestroy + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid \"when\" keyword", + Detail: "The \"when\" argument requires one of the following keywords: create or destroy.", + Subject: expr.Range().Ptr(), + }) + } + } + + if attr, exists := content.Attributes["on_failure"]; exists { + expr, shimDiags := shimTraversalInString(attr.Expr, true) + diags = append(diags, shimDiags...) + + switch hcl.ExprAsKeyword(expr) { + case "continue": + pv.OnFailure = ProvisionerOnFailureContinue + case "fail": + pv.OnFailure = ProvisionerOnFailureFail + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid \"on_failure\" keyword", + Detail: "The \"on_failure\" argument requires one of the following keywords: continue or fail.", + Subject: attr.Expr.Range().Ptr(), + }) + } + } + + var seenConnection *hcl.Block + for _, block := range content.Blocks { + switch block.Type { + + case "connection": + if seenConnection != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate connection block", + Detail: fmt.Sprintf("This provisioner already has a connection block at %s.", seenConnection.DefRange), + Subject: &block.DefRange, + }) + continue + } + seenConnection = block + + //conn, connDiags := decodeConnectionBlock(block) + //diags = append(diags, connDiags...) + pv.Connection = &Connection{ + Config: block.Body, + DeclRange: block.DefRange, + } + + default: + // Any other block types are ones we've reserved for future use, + // so they get a generic message. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved block type name in provisioner block", + Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), + Subject: &block.TypeRange, + }) + } + } + + return pv, diags +} + +// Connection represents a "connection" block when used within either a +// "resource" or "provisioner" block in a module or file. +type Connection struct { + Config hcl.Body + + DeclRange hcl.Range +} + +// ProvisionerWhen is an enum for valid values for when to run provisioners. +type ProvisionerWhen int + +//go:generate stringer -type ProvisionerWhen + +const ( + ProvisionerWhenInvalid ProvisionerWhen = iota + ProvisionerWhenCreate + ProvisionerWhenDestroy +) + +// ProvisionerOnFailure is an enum for valid values for on_failure options +// for provisioners. +type ProvisionerOnFailure int + +//go:generate stringer -type ProvisionerOnFailure + +const ( + ProvisionerOnFailureInvalid ProvisionerOnFailure = iota + ProvisionerOnFailureContinue + ProvisionerOnFailureFail +) + +var provisionerBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + {Name: "when"}, + {Name: "on_failure"}, + }, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "connection"}, + {Type: "lifecycle"}, // reserved for future use + }, +} diff --git a/vendor/github.com/hashicorp/terraform/configs/provisioneronfailure_string.go b/vendor/github.com/hashicorp/terraform/configs/provisioneronfailure_string.go new file mode 100644 index 0000000000..8704b0861c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/provisioneronfailure_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type ProvisionerOnFailure"; DO NOT EDIT. + +package configs + +import "strconv" + +const _ProvisionerOnFailure_name = "ProvisionerOnFailureInvalidProvisionerOnFailureContinueProvisionerOnFailureFail" + +var _ProvisionerOnFailure_index = [...]uint8{0, 27, 55, 79} + +func (i ProvisionerOnFailure) String() string { + if i < 0 || i >= ProvisionerOnFailure(len(_ProvisionerOnFailure_index)-1) { + return "ProvisionerOnFailure(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ProvisionerOnFailure_name[_ProvisionerOnFailure_index[i]:_ProvisionerOnFailure_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform/configs/provisionerwhen_string.go b/vendor/github.com/hashicorp/terraform/configs/provisionerwhen_string.go new file mode 100644 index 0000000000..cbecb20ae1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/provisionerwhen_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type ProvisionerWhen"; DO NOT EDIT. + +package configs + +import "strconv" + +const _ProvisionerWhen_name = "ProvisionerWhenInvalidProvisionerWhenCreateProvisionerWhenDestroy" + +var _ProvisionerWhen_index = [...]uint8{0, 22, 43, 65} + +func (i ProvisionerWhen) String() string { + if i < 0 || i >= ProvisionerWhen(len(_ProvisionerWhen_index)-1) { + return "ProvisionerWhen(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ProvisionerWhen_name[_ProvisionerWhen_index[i]:_ProvisionerWhen_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform/configs/resource.go b/vendor/github.com/hashicorp/terraform/configs/resource.go new file mode 100644 index 0000000000..de1a3434a4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/resource.go @@ -0,0 +1,486 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl2/gohcl" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + + "github.com/hashicorp/terraform/addrs" +) + +// Resource represents a "resource" or "data" block in a module or file. +type Resource struct { + Mode addrs.ResourceMode + Name string + Type string + Config hcl.Body + Count hcl.Expression + ForEach hcl.Expression + + ProviderConfigRef *ProviderConfigRef + + DependsOn []hcl.Traversal + + // Managed is populated only for Mode = addrs.ManagedResourceMode, + // containing the additional fields that apply to managed resources. + // For all other resource modes, this field is nil. + Managed *ManagedResource + + DeclRange hcl.Range + TypeRange hcl.Range +} + +// ManagedResource represents a "resource" block in a module or file. +type ManagedResource struct { + Connection *Connection + Provisioners []*Provisioner + + CreateBeforeDestroy bool + PreventDestroy bool + IgnoreChanges []hcl.Traversal + IgnoreAllChanges bool + + CreateBeforeDestroySet bool + PreventDestroySet bool +} + +func (r *Resource) moduleUniqueKey() string { + return r.Addr().String() +} + +// Addr returns a resource address for the receiver that is relative to the +// resource's containing module. +func (r *Resource) Addr() addrs.Resource { + return addrs.Resource{ + Mode: r.Mode, + Type: r.Type, + Name: r.Name, + } +} + +// ProviderConfigAddr returns the address for the provider configuration +// that should be used for this resource. This function implements the +// default behavior of extracting the type from the resource type name if +// an explicit "provider" argument was not provided. +func (r *Resource) ProviderConfigAddr() addrs.ProviderConfig { + if r.ProviderConfigRef == nil { + return r.Addr().DefaultProviderConfig() + } + + return addrs.ProviderConfig{ + Type: r.ProviderConfigRef.Name, + Alias: r.ProviderConfigRef.Alias, + } +} + +func decodeResourceBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { + r := &Resource{ + Mode: addrs.ManagedResourceMode, + Type: block.Labels[0], + Name: block.Labels[1], + DeclRange: block.DefRange, + TypeRange: block.LabelRanges[0], + Managed: &ManagedResource{}, + } + + content, remain, diags := block.Body.PartialContent(resourceBlockSchema) + r.Config = remain + + if !hclsyntax.ValidIdentifier(r.Type) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource type name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[0], + }) + } + if !hclsyntax.ValidIdentifier(r.Name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[1], + }) + } + + if attr, exists := content.Attributes["count"]; exists { + r.Count = attr.Expr + } + + if attr, exists := content.Attributes["for_each"]; exists { + r.ForEach = attr.Expr + // We currently parse this, but don't yet do anything with it. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved argument name in resource block", + Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), + Subject: &attr.NameRange, + }) + } + + if attr, exists := content.Attributes["provider"]; exists { + var providerDiags hcl.Diagnostics + r.ProviderConfigRef, providerDiags = decodeProviderConfigRef(attr.Expr, "provider") + diags = append(diags, providerDiags...) + } + + if attr, exists := content.Attributes["depends_on"]; exists { + deps, depsDiags := decodeDependsOn(attr) + diags = append(diags, depsDiags...) + r.DependsOn = append(r.DependsOn, deps...) + } + + var seenLifecycle *hcl.Block + var seenConnection *hcl.Block + for _, block := range content.Blocks { + switch block.Type { + case "lifecycle": + if seenLifecycle != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate lifecycle block", + Detail: fmt.Sprintf("This resource already has a lifecycle block at %s.", seenLifecycle.DefRange), + Subject: &block.DefRange, + }) + continue + } + seenLifecycle = block + + lcContent, lcDiags := block.Body.Content(resourceLifecycleBlockSchema) + diags = append(diags, lcDiags...) + + if attr, exists := lcContent.Attributes["create_before_destroy"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &r.Managed.CreateBeforeDestroy) + diags = append(diags, valDiags...) + r.Managed.CreateBeforeDestroySet = true + } + + if attr, exists := lcContent.Attributes["prevent_destroy"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &r.Managed.PreventDestroy) + diags = append(diags, valDiags...) + r.Managed.PreventDestroySet = true + } + + if attr, exists := lcContent.Attributes["ignore_changes"]; exists { + + // ignore_changes can either be a list of relative traversals + // or it can be just the keyword "all" to ignore changes to this + // resource entirely. + // ignore_changes = [ami, instance_type] + // ignore_changes = all + // We also allow two legacy forms for compatibility with earlier + // versions: + // ignore_changes = ["ami", "instance_type"] + // ignore_changes = ["*"] + + kw := hcl.ExprAsKeyword(attr.Expr) + + switch { + case kw == "all": + r.Managed.IgnoreAllChanges = true + default: + exprs, listDiags := hcl.ExprList(attr.Expr) + diags = append(diags, listDiags...) + + var ignoreAllRange hcl.Range + + for _, expr := range exprs { + + // our expr might be the literal string "*", which + // we accept as a deprecated way of saying "all". + if shimIsIgnoreChangesStar(expr) { + r.Managed.IgnoreAllChanges = true + ignoreAllRange = expr.Range() + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Deprecated ignore_changes wildcard", + Detail: "The [\"*\"] form of ignore_changes wildcard is deprecated. Use \"ignore_changes = all\" to ignore changes to all attributes.", + Subject: attr.Expr.Range().Ptr(), + }) + continue + } + + expr, shimDiags := shimTraversalInString(expr, false) + diags = append(diags, shimDiags...) + + traversal, travDiags := hcl.RelTraversalForExpr(expr) + diags = append(diags, travDiags...) + if len(traversal) != 0 { + r.Managed.IgnoreChanges = append(r.Managed.IgnoreChanges, traversal) + } + } + + if r.Managed.IgnoreAllChanges && len(r.Managed.IgnoreChanges) != 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid ignore_changes ruleset", + Detail: "Cannot mix wildcard string \"*\" with non-wildcard references.", + Subject: &ignoreAllRange, + Context: attr.Expr.Range().Ptr(), + }) + } + + } + + } + + case "connection": + if seenConnection != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate connection block", + Detail: fmt.Sprintf("This resource already has a connection block at %s.", seenConnection.DefRange), + Subject: &block.DefRange, + }) + continue + } + seenConnection = block + + r.Managed.Connection = &Connection{ + Config: block.Body, + DeclRange: block.DefRange, + } + + case "provisioner": + pv, pvDiags := decodeProvisionerBlock(block) + diags = append(diags, pvDiags...) + if pv != nil { + r.Managed.Provisioners = append(r.Managed.Provisioners, pv) + } + + default: + // Any other block types are ones we've reserved for future use, + // so they get a generic message. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved block type name in resource block", + Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), + Subject: &block.TypeRange, + }) + } + } + + return r, diags +} + +func decodeDataBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { + r := &Resource{ + Mode: addrs.DataResourceMode, + Type: block.Labels[0], + Name: block.Labels[1], + DeclRange: block.DefRange, + TypeRange: block.LabelRanges[0], + } + + content, remain, diags := block.Body.PartialContent(dataBlockSchema) + r.Config = remain + + if !hclsyntax.ValidIdentifier(r.Type) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid data source name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[0], + }) + } + if !hclsyntax.ValidIdentifier(r.Name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid data resource name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[1], + }) + } + + if attr, exists := content.Attributes["count"]; exists { + r.Count = attr.Expr + } + + if attr, exists := content.Attributes["for_each"]; exists { + r.ForEach = attr.Expr + // We currently parse this, but don't yet do anything with it. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved argument name in module block", + Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), + Subject: &attr.NameRange, + }) + } + + if attr, exists := content.Attributes["provider"]; exists { + var providerDiags hcl.Diagnostics + r.ProviderConfigRef, providerDiags = decodeProviderConfigRef(attr.Expr, "provider") + diags = append(diags, providerDiags...) + } + + if attr, exists := content.Attributes["depends_on"]; exists { + deps, depsDiags := decodeDependsOn(attr) + diags = append(diags, depsDiags...) + r.DependsOn = append(r.DependsOn, deps...) + } + + for _, block := range content.Blocks { + // All of the block types we accept are just reserved for future use, but some get a specialized error message. + switch block.Type { + case "lifecycle": + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported lifecycle block", + Detail: "Data resources do not have lifecycle settings, so a lifecycle block is not allowed.", + Subject: &block.DefRange, + }) + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved block type name in data block", + Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), + Subject: &block.TypeRange, + }) + } + } + + return r, diags +} + +type ProviderConfigRef struct { + Name string + NameRange hcl.Range + Alias string + AliasRange *hcl.Range // nil if alias not set +} + +func decodeProviderConfigRef(expr hcl.Expression, argName string) (*ProviderConfigRef, hcl.Diagnostics) { + var diags hcl.Diagnostics + + var shimDiags hcl.Diagnostics + expr, shimDiags = shimTraversalInString(expr, false) + diags = append(diags, shimDiags...) + + traversal, travDiags := hcl.AbsTraversalForExpr(expr) + + // AbsTraversalForExpr produces only generic errors, so we'll discard + // the errors given and produce our own with extra context. If we didn't + // get any errors then we might still have warnings, though. + if !travDiags.HasErrors() { + diags = append(diags, travDiags...) + } + + if len(traversal) < 1 || len(traversal) > 2 { + // A provider reference was given as a string literal in the legacy + // configuration language and there are lots of examples out there + // showing that usage, so we'll sniff for that situation here and + // produce a specialized error message for it to help users find + // the new correct form. + if exprIsNativeQuotedString(expr) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration reference", + Detail: "A provider configuration reference must not be given in quotes.", + Subject: expr.Range().Ptr(), + }) + return nil, diags + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration reference", + Detail: fmt.Sprintf("The %s argument requires a provider type name, optionally followed by a period and then a configuration alias.", argName), + Subject: expr.Range().Ptr(), + }) + return nil, diags + } + + ret := &ProviderConfigRef{ + Name: traversal.RootName(), + NameRange: traversal[0].SourceRange(), + } + + if len(traversal) > 1 { + aliasStep, ok := traversal[1].(hcl.TraverseAttr) + if !ok { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration reference", + Detail: "Provider name must either stand alone or be followed by a period and then a configuration alias.", + Subject: traversal[1].SourceRange().Ptr(), + }) + return ret, diags + } + + ret.Alias = aliasStep.Name + ret.AliasRange = aliasStep.SourceRange().Ptr() + } + + return ret, diags +} + +// Addr returns the provider config address corresponding to the receiving +// config reference. +// +// This is a trivial conversion, essentially just discarding the source +// location information and keeping just the addressing information. +func (r *ProviderConfigRef) Addr() addrs.ProviderConfig { + return addrs.ProviderConfig{ + Type: r.Name, + Alias: r.Alias, + } +} + +func (r *ProviderConfigRef) String() string { + if r == nil { + return "" + } + if r.Alias != "" { + return fmt.Sprintf("%s.%s", r.Name, r.Alias) + } + return r.Name +} + +var commonResourceAttributes = []hcl.AttributeSchema{ + { + Name: "count", + }, + { + Name: "for_each", + }, + { + Name: "provider", + }, + { + Name: "depends_on", + }, +} + +var resourceBlockSchema = &hcl.BodySchema{ + Attributes: commonResourceAttributes, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "locals"}, // reserved for future use + {Type: "lifecycle"}, + {Type: "connection"}, + {Type: "provisioner", LabelNames: []string{"type"}}, + }, +} + +var dataBlockSchema = &hcl.BodySchema{ + Attributes: commonResourceAttributes, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "lifecycle"}, // reserved for future use + {Type: "locals"}, // reserved for future use + }, +} + +var resourceLifecycleBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "create_before_destroy", + }, + { + Name: "prevent_destroy", + }, + { + Name: "ignore_changes", + }, + }, +} diff --git a/vendor/github.com/hashicorp/terraform/configs/synth_body.go b/vendor/github.com/hashicorp/terraform/configs/synth_body.go new file mode 100644 index 0000000000..3ae1bff6ad --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/synth_body.go @@ -0,0 +1,118 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" +) + +// SynthBody produces a synthetic hcl.Body that behaves as if it had attributes +// corresponding to the elements given in the values map. +// +// This is useful in situations where, for example, values provided on the +// command line can override values given in configuration, using MergeBodies. +// +// The given filename is used in case any diagnostics are returned. Since +// the created body is synthetic, it is likely that this will not be a "real" +// filename. For example, if from a command line argument it could be +// a representation of that argument's name, such as "-var=...". +func SynthBody(filename string, values map[string]cty.Value) hcl.Body { + return synthBody{ + Filename: filename, + Values: values, + } +} + +type synthBody struct { + Filename string + Values map[string]cty.Value +} + +func (b synthBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + content, remain, diags := b.PartialContent(schema) + remainS := remain.(synthBody) + for name := range remainS.Values { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported attribute", + Detail: fmt.Sprintf("An attribute named %q is not expected here.", name), + Subject: b.synthRange().Ptr(), + }) + } + return content, diags +} + +func (b synthBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + var diags hcl.Diagnostics + content := &hcl.BodyContent{ + Attributes: make(hcl.Attributes), + MissingItemRange: b.synthRange(), + } + + remainValues := make(map[string]cty.Value) + for attrName, val := range b.Values { + remainValues[attrName] = val + } + + for _, attrS := range schema.Attributes { + delete(remainValues, attrS.Name) + val, defined := b.Values[attrS.Name] + if !defined { + if attrS.Required { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing required attribute", + Detail: fmt.Sprintf("The attribute %q is required, but no definition was found.", attrS.Name), + Subject: b.synthRange().Ptr(), + }) + } + continue + } + content.Attributes[attrS.Name] = b.synthAttribute(attrS.Name, val) + } + + // We just ignore blocks altogether, because this body type never has + // nested blocks. + + remain := synthBody{ + Filename: b.Filename, + Values: remainValues, + } + + return content, remain, diags +} + +func (b synthBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + ret := make(hcl.Attributes) + for name, val := range b.Values { + ret[name] = b.synthAttribute(name, val) + } + return ret, nil +} + +func (b synthBody) MissingItemRange() hcl.Range { + return b.synthRange() +} + +func (b synthBody) synthAttribute(name string, val cty.Value) *hcl.Attribute { + rng := b.synthRange() + return &hcl.Attribute{ + Name: name, + Expr: &hclsyntax.LiteralValueExpr{ + Val: val, + SrcRange: rng, + }, + NameRange: rng, + Range: rng, + } +} + +func (b synthBody) synthRange() hcl.Range { + return hcl.Range{ + Filename: b.Filename, + Start: hcl.Pos{Line: 1, Column: 1}, + End: hcl.Pos{Line: 1, Column: 1}, + } +} diff --git a/vendor/github.com/hashicorp/terraform/configs/util.go b/vendor/github.com/hashicorp/terraform/configs/util.go new file mode 100644 index 0000000000..5fbde43109 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/util.go @@ -0,0 +1,63 @@ +package configs + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" +) + +// exprIsNativeQuotedString determines whether the given expression looks like +// it's a quoted string in the HCL native syntax. +// +// This should be used sparingly only for situations where our legacy HCL +// decoding would've expected a keyword or reference in quotes but our new +// decoding expects the keyword or reference to be provided directly as +// an identifier-based expression. +func exprIsNativeQuotedString(expr hcl.Expression) bool { + _, ok := expr.(*hclsyntax.TemplateExpr) + return ok +} + +// schemaForOverrides takes a *hcl.BodySchema and produces a new one that is +// equivalent except that any required attributes are forced to not be required. +// +// This is useful for dealing with "override" config files, which are allowed +// to omit things that they don't wish to override from the main configuration. +// +// The returned schema may have some pointers in common with the given schema, +// so neither the given schema nor the returned schema should be modified after +// using this function in order to avoid confusion. +// +// Overrides are rarely used, so it's recommended to just create the override +// schema on the fly only when it's needed, rather than storing it in a global +// variable as we tend to do for a primary schema. +func schemaForOverrides(schema *hcl.BodySchema) *hcl.BodySchema { + ret := &hcl.BodySchema{ + Attributes: make([]hcl.AttributeSchema, len(schema.Attributes)), + Blocks: schema.Blocks, + } + + for i, attrS := range schema.Attributes { + ret.Attributes[i] = attrS + ret.Attributes[i].Required = false + } + + return ret +} + +// schemaWithDynamic takes a *hcl.BodySchema and produces a new one that +// is equivalent except that it accepts an additional block type "dynamic" with +// a single label, used to recognize usage of the HCL dynamic block extension. +func schemaWithDynamic(schema *hcl.BodySchema) *hcl.BodySchema { + ret := &hcl.BodySchema{ + Attributes: schema.Attributes, + Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+1), + } + + copy(ret.Blocks, schema.Blocks) + ret.Blocks = append(ret.Blocks, hcl.BlockHeaderSchema{ + Type: "dynamic", + LabelNames: []string{"type"}, + }) + + return ret +} diff --git a/vendor/github.com/hashicorp/terraform/configs/variable_type_hint.go b/vendor/github.com/hashicorp/terraform/configs/variable_type_hint.go new file mode 100644 index 0000000000..204efd1207 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/variable_type_hint.go @@ -0,0 +1,45 @@ +package configs + +// VariableTypeHint is an enumeration used for the Variable.TypeHint field, +// which is an incompletely-specified type for the variable which is used +// as a hint for whether a value provided in an ambiguous context (on the +// command line or in an environment variable) should be taken literally as a +// string or parsed as an HCL expression to produce a data structure. +// +// The type hint is applied to runtime values as well, but since it does not +// accurately describe a precise type it is not fully-sufficient to infer +// the dynamic type of a value passed through a variable. +// +// These hints use inaccurate terminology for historical reasons. Full details +// are in the documentation for each constant in this enumeration, but in +// summary: +// +// TypeHintString requires a primitive type +// TypeHintList requires a type that could be converted to a tuple +// TypeHintMap requires a type that could be converted to an object +type VariableTypeHint rune + +//go:generate stringer -type VariableTypeHint + +// TypeHintNone indicates the absense of a type hint. Values specified in +// ambiguous contexts will be treated as literal strings, as if TypeHintString +// were selected, but no runtime value checks will be applied. This is reasonable +// type hint for a module that is never intended to be used at the top-level +// of a configuration, since descendent modules never recieve values from +// ambiguous contexts. +const TypeHintNone VariableTypeHint = 0 + +// TypeHintString spec indicates that a value provided in an ambiguous context +// should be treated as a literal string, and additionally requires that the +// runtime value for the variable is of a primitive type (string, number, bool). +const TypeHintString VariableTypeHint = 'S' + +// TypeHintList indicates that a value provided in an ambiguous context should +// be treated as an HCL expression, and additionally requires that the +// runtime value for the variable is of an tuple, list, or set type. +const TypeHintList VariableTypeHint = 'L' + +// TypeHintMap indicates that a value provided in an ambiguous context should +// be treated as an HCL expression, and additionally requires that the +// runtime value for the variable is of an object or map type. +const TypeHintMap VariableTypeHint = 'M' diff --git a/vendor/github.com/hashicorp/terraform/configs/variabletypehint_string.go b/vendor/github.com/hashicorp/terraform/configs/variabletypehint_string.go new file mode 100644 index 0000000000..4447cf5562 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/variabletypehint_string.go @@ -0,0 +1,29 @@ +// Code generated by "stringer -type VariableTypeHint"; DO NOT EDIT. + +package configs + +import "strconv" + +const ( + _VariableTypeHint_name_0 = "TypeHintNone" + _VariableTypeHint_name_1 = "TypeHintListTypeHintMap" + _VariableTypeHint_name_2 = "TypeHintString" +) + +var ( + _VariableTypeHint_index_1 = [...]uint8{0, 12, 23} +) + +func (i VariableTypeHint) String() string { + switch { + case i == 0: + return _VariableTypeHint_name_0 + case 76 <= i && i <= 77: + i -= 76 + return _VariableTypeHint_name_1[_VariableTypeHint_index_1[i]:_VariableTypeHint_index_1[i+1]] + case i == 83: + return _VariableTypeHint_name_2 + default: + return "VariableTypeHint(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/hashicorp/terraform/configs/version_constraint.go b/vendor/github.com/hashicorp/terraform/configs/version_constraint.go new file mode 100644 index 0000000000..7aa19efc67 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/version_constraint.go @@ -0,0 +1,64 @@ +package configs + +import ( + "fmt" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// VersionConstraint represents a version constraint on some resource +// (e.g. Terraform Core, a provider, a module, ...) that carries with it +// a source range so that a helpful diagnostic can be printed in the event +// that a particular constraint does not match. +type VersionConstraint struct { + Required version.Constraints + DeclRange hcl.Range +} + +func decodeVersionConstraint(attr *hcl.Attribute) (VersionConstraint, hcl.Diagnostics) { + ret := VersionConstraint{ + DeclRange: attr.Range, + } + + val, diags := attr.Expr.Value(nil) + if diags.HasErrors() { + return ret, diags + } + var err error + val, err = convert.Convert(val, cty.String) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid version constraint", + Detail: fmt.Sprintf("A string value is required for %s.", attr.Name), + Subject: attr.Expr.Range().Ptr(), + }) + return ret, diags + } + + if val.IsNull() { + // A null version constraint is strange, but we'll just treat it + // like an empty constraint set. + return ret, diags + } + + constraintStr := val.AsString() + constraints, err := version.NewConstraint(constraintStr) + if err != nil { + // NewConstraint doesn't return user-friendly errors, so we'll just + // ignore the provided error and produce our own generic one. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid version constraint", + Detail: "This string does not use correct version constraint syntax.", // Not very actionable :( + Subject: attr.Expr.Range().Ptr(), + }) + return ret, diags + } + + ret.Required = constraints + return ret, diags +} diff --git a/vendor/github.com/hashicorp/terraform/dag/dag.go b/vendor/github.com/hashicorp/terraform/dag/dag.go index b7eb10c335..77c67eff9f 100644 --- a/vendor/github.com/hashicorp/terraform/dag/dag.go +++ b/vendor/github.com/hashicorp/terraform/dag/dag.go @@ -5,6 +5,8 @@ import ( "sort" "strings" + "github.com/hashicorp/terraform/tfdiags" + "github.com/hashicorp/go-multierror" ) @@ -15,7 +17,7 @@ type AcyclicGraph struct { } // WalkFunc is the callback used for walking the graph. -type WalkFunc func(Vertex) error +type WalkFunc func(Vertex) tfdiags.Diagnostics // DepthWalkFunc is a walk function that also receives the current depth of the // walk as an argument @@ -161,9 +163,9 @@ func (g *AcyclicGraph) Cycles() [][]Vertex { } // Walk walks the graph, calling your callback as each node is visited. -// This will walk nodes in parallel if it can. Because the walk is done -// in parallel, the error returned will be a multierror. -func (g *AcyclicGraph) Walk(cb WalkFunc) error { +// This will walk nodes in parallel if it can. The resulting diagnostics +// contains problems from all graphs visited, in no particular order. +func (g *AcyclicGraph) Walk(cb WalkFunc) tfdiags.Diagnostics { defer g.debug.BeginOperation(typeWalk, "").End("") w := &Walker{Callback: cb, Reverse: true} diff --git a/vendor/github.com/hashicorp/terraform/dag/walk.go b/vendor/github.com/hashicorp/terraform/dag/walk.go index f03b10030b..1c926c2c28 100644 --- a/vendor/github.com/hashicorp/terraform/dag/walk.go +++ b/vendor/github.com/hashicorp/terraform/dag/walk.go @@ -2,12 +2,11 @@ package dag import ( "errors" - "fmt" "log" "sync" "time" - "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/tfdiags" ) // Walker is used to walk every vertex of a graph in parallel. @@ -54,10 +53,15 @@ type Walker struct { // if new vertices are added. wait sync.WaitGroup - // errMap contains the errors recorded so far for execution. Reading - // and writing should hold errLock. - errMap map[Vertex]error - errLock sync.Mutex + // diagsMap contains the diagnostics recorded so far for execution, + // and upstreamFailed contains all the vertices whose problems were + // caused by upstream failures, and thus whose diagnostics should be + // excluded from the final set. + // + // Readers and writers of either map must hold diagsLock. + diagsMap map[Vertex]tfdiags.Diagnostics + upstreamFailed map[Vertex]struct{} + diagsLock sync.Mutex } type walkerVertex struct { @@ -98,31 +102,30 @@ type walkerVertex struct { // user-returned error. var errWalkUpstream = errors.New("upstream dependency failed") -// Wait waits for the completion of the walk and returns any errors ( -// in the form of a multierror) that occurred. Update should be called -// to populate the walk with vertices and edges prior to calling this. +// Wait waits for the completion of the walk and returns diagnostics describing +// any problems that arose. Update should be called to populate the walk with +// vertices and edges prior to calling this. // // Wait will return as soon as all currently known vertices are complete. // If you plan on calling Update with more vertices in the future, you // should not call Wait until after this is done. -func (w *Walker) Wait() error { +func (w *Walker) Wait() tfdiags.Diagnostics { // Wait for completion w.wait.Wait() - // Grab the error lock - w.errLock.Lock() - defer w.errLock.Unlock() - - // Build the error - var result error - for v, err := range w.errMap { - if err != nil && err != errWalkUpstream { - result = multierror.Append(result, fmt.Errorf( - "%s: %s", VertexName(v), err)) + var diags tfdiags.Diagnostics + w.diagsLock.Lock() + for v, vDiags := range w.diagsMap { + if _, upstream := w.upstreamFailed[v]; upstream { + // Ignore diagnostics for nodes that had failed upstreams, since + // the downstream diagnostics are likely to be redundant. + continue } + diags = diags.Append(vDiags) } + w.diagsLock.Unlock() - return result + return diags } // Update updates the currently executing walk with the given graph. @@ -136,6 +139,7 @@ func (w *Walker) Wait() error { // Multiple Updates can be called in parallel. Update can be called at any // time during a walk. func (w *Walker) Update(g *AcyclicGraph) { + log.Print("[TRACE] dag/walk: updating graph") var v, e *Set if g != nil { v, e = g.vertices, g.edges @@ -381,25 +385,34 @@ func (w *Walker) walkVertex(v Vertex, info *walkerVertex) { } // Run our callback or note that our upstream failed - var err error + var diags tfdiags.Diagnostics + var upstreamFailed bool if depsSuccess { - log.Printf("[TRACE] dag/walk: walking %q", VertexName(v)) - err = w.Callback(v) + log.Printf("[TRACE] dag/walk: visiting %q", VertexName(v)) + diags = w.Callback(v) } else { - log.Printf("[TRACE] dag/walk: upstream errored, not walking %q", VertexName(v)) - err = errWalkUpstream + log.Printf("[TRACE] dag/walk: upstream of %q errored, so skipping", VertexName(v)) + // This won't be displayed to the user because we'll set upstreamFailed, + // but we need to ensure there's at least one error in here so that + // the failures will cascade downstream. + diags = diags.Append(errors.New("upstream dependencies failed")) + upstreamFailed = true } - // Record the error - if err != nil { - w.errLock.Lock() - defer w.errLock.Unlock() - - if w.errMap == nil { - w.errMap = make(map[Vertex]error) - } - w.errMap[v] = err + // Record the result (we must do this after execution because we mustn't + // hold diagsLock while visiting a vertex.) + w.diagsLock.Lock() + if w.diagsMap == nil { + w.diagsMap = make(map[Vertex]tfdiags.Diagnostics) + } + w.diagsMap[v] = diags + if w.upstreamFailed == nil { + w.upstreamFailed = make(map[Vertex]struct{}) } + if upstreamFailed { + w.upstreamFailed[v] = struct{}{} + } + w.diagsLock.Unlock() } func (w *Walker) waitDeps( @@ -407,6 +420,7 @@ func (w *Walker) waitDeps( deps map[Vertex]<-chan struct{}, doneCh chan<- bool, cancelCh <-chan struct{}) { + // For each dependency given to us, wait for it to complete for dep, depCh := range deps { DepSatisfied: @@ -423,17 +437,17 @@ func (w *Walker) waitDeps( return case <-time.After(time.Second * 5): - log.Printf("[TRACE] dag/walk: vertex %q, waiting for: %q", + log.Printf("[TRACE] dag/walk: vertex %q is waiting for %q", VertexName(v), VertexName(dep)) } } } // Dependencies satisfied! We need to check if any errored - w.errLock.Lock() - defer w.errLock.Unlock() - for dep, _ := range deps { - if w.errMap[dep] != nil { + w.diagsLock.Lock() + defer w.diagsLock.Unlock() + for dep := range deps { + if w.diagsMap[dep].HasErrors() { // One of our dependencies failed, so return false doneCh <- false return diff --git a/vendor/github.com/hashicorp/terraform/helper/didyoumean/name_suggestion.go b/vendor/github.com/hashicorp/terraform/helper/didyoumean/name_suggestion.go new file mode 100644 index 0000000000..54899bc652 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/didyoumean/name_suggestion.go @@ -0,0 +1,24 @@ +package didyoumean + +import ( + "github.com/agext/levenshtein" +) + +// NameSuggestion tries to find a name from the given slice of suggested names +// that is close to the given name and returns it if found. If no suggestion +// is close enough, returns the empty string. +// +// The suggestions are tried in order, so earlier suggestions take precedence +// if the given string is similar to two or more suggestions. +// +// This function is intended to be used with a relatively-small number of +// suggestions. It's not optimized for hundreds or thousands of them. +func NameSuggestion(given string, suggestions []string) string { + for _, suggestion := range suggestions { + dist := levenshtein.Distance(given, suggestion, nil) + if dist < 3 { // threshold determined experimentally + return suggestion + } + } + return "" +} diff --git a/vendor/github.com/hashicorp/terraform/helper/plugin/doc.go b/vendor/github.com/hashicorp/terraform/helper/plugin/doc.go new file mode 100644 index 0000000000..82b5937bfe --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/plugin/doc.go @@ -0,0 +1,6 @@ +// Package plugin contains types and functions to help Terraform plugins +// implement the plugin rpc interface. +// The primary Provider type will be responsible for converting from the grpc +// wire protocol to the types and methods known to the provider +// implementations. +package plugin diff --git a/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provider.go b/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provider.go new file mode 100644 index 0000000000..143e0f645a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provider.go @@ -0,0 +1,1311 @@ +package plugin + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + + "github.com/zclconf/go-cty/cty" + ctyconvert "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/msgpack" + context "golang.org/x/net/context" + + "github.com/hashicorp/terraform/config/hcl2shim" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/helper/schema" + proto "github.com/hashicorp/terraform/internal/tfplugin5" + "github.com/hashicorp/terraform/plugin/convert" + "github.com/hashicorp/terraform/terraform" +) + +const newExtraKey = "_new_extra_shim" + +// NewGRPCProviderServerShim wraps a terraform.ResourceProvider in a +// proto.ProviderServer implementation. If the provided provider is not a +// *schema.Provider, this will return nil, +func NewGRPCProviderServerShim(p terraform.ResourceProvider) *GRPCProviderServer { + sp, ok := p.(*schema.Provider) + if !ok { + return nil + } + + return &GRPCProviderServer{ + provider: sp, + } +} + +// GRPCProviderServer handles the server, or plugin side of the rpc connection. +type GRPCProviderServer struct { + provider *schema.Provider +} + +func (s *GRPCProviderServer) GetSchema(_ context.Context, req *proto.GetProviderSchema_Request) (*proto.GetProviderSchema_Response, error) { + // Here we are certain that the provider is being called through grpc, so + // make sure the feature flag for helper/schema is set + schema.SetProto5() + + resp := &proto.GetProviderSchema_Response{ + ResourceSchemas: make(map[string]*proto.Schema), + DataSourceSchemas: make(map[string]*proto.Schema), + } + + resp.Provider = &proto.Schema{ + Block: convert.ConfigSchemaToProto(s.getProviderSchemaBlockForCore()), + } + + for typ, res := range s.provider.ResourcesMap { + resp.ResourceSchemas[typ] = &proto.Schema{ + Version: int64(res.SchemaVersion), + Block: convert.ConfigSchemaToProto(res.CoreConfigSchema()), + } + } + + for typ, dat := range s.provider.DataSourcesMap { + resp.DataSourceSchemas[typ] = &proto.Schema{ + Version: int64(dat.SchemaVersion), + Block: convert.ConfigSchemaToProto(dat.CoreConfigSchema()), + } + } + + return resp, nil +} + +func (s *GRPCProviderServer) getProviderSchemaBlockForCore() *configschema.Block { + return schema.InternalMap(s.provider.Schema).CoreConfigSchema() +} + +func (s *GRPCProviderServer) getResourceSchemaBlockForCore(name string) *configschema.Block { + res := s.provider.ResourcesMap[name] + return res.CoreConfigSchema() +} + +func (s *GRPCProviderServer) getDatasourceSchemaBlockForCore(name string) *configschema.Block { + dat := s.provider.DataSourcesMap[name] + return dat.CoreConfigSchema() +} + +func (s *GRPCProviderServer) getProviderSchemaBlockForShimming() *configschema.Block { + newSchema := map[string]*schema.Schema{} + for attr, s := range s.provider.Schema { + newSchema[attr] = schema.LegacySchema(s) + } + + return schema.InternalMap(newSchema).CoreConfigSchema() +} + +func (s *GRPCProviderServer) getResourceSchemaBlockForShimming(name string) *configschema.Block { + res := s.provider.ResourcesMap[name] + return schema.LegacyResourceSchema(res).CoreConfigSchema() +} + +func (s *GRPCProviderServer) getDatasourceSchemaBlockForShimming(name string) *configschema.Block { + dat := s.provider.DataSourcesMap[name] + return schema.LegacyResourceSchema(dat).CoreConfigSchema() +} + +func (s *GRPCProviderServer) PrepareProviderConfig(_ context.Context, req *proto.PrepareProviderConfig_Request) (*proto.PrepareProviderConfig_Response, error) { + resp := &proto.PrepareProviderConfig_Response{} + + blockForCore := s.getProviderSchemaBlockForCore() + blockForShimming := s.getProviderSchemaBlockForShimming() + + configVal, err := msgpack.Unmarshal(req.Config.Msgpack, blockForCore.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // lookup any required, top-level attributes that are Null, and see if we + // have a Default value available. + configVal, err = cty.Transform(configVal, func(path cty.Path, val cty.Value) (cty.Value, error) { + // we're only looking for top-level attributes + if len(path) != 1 { + return val, nil + } + + // nothing to do if we already have a value + if !val.IsNull() { + return val, nil + } + + // get the Schema definition for this attribute + getAttr, ok := path[0].(cty.GetAttrStep) + // these should all exist, but just ignore anything strange + if !ok { + return val, nil + } + + attrSchema := s.provider.Schema[getAttr.Name] + // continue to ignore anything that doesn't match + if attrSchema == nil { + return val, nil + } + + // this is deprecated, so don't set it + if attrSchema.Deprecated != "" || attrSchema.Removed != "" { + return val, nil + } + + // find a default value if it exists + def, err := attrSchema.DefaultValue() + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("error getting default for %q: %s", getAttr.Name, err)) + return val, err + } + + // no default + if def == nil { + return val, nil + } + + // create a cty.Value and make sure it's the correct type + tmpVal := hcl2shim.HCL2ValueFromConfigValue(def) + + // helper/schema used to allow setting "" to a bool + if val.Type() == cty.Bool && tmpVal.RawEquals(cty.StringVal("")) { + // return a warning about the conversion + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, "provider set empty string as default value for bool "+getAttr.Name) + tmpVal = cty.False + } + + val, err = ctyconvert.Convert(tmpVal, val.Type()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("error setting default for %q: %s", getAttr.Name, err)) + } + + return val, err + }) + if err != nil { + // any error here was already added to the diagnostics + return resp, nil + } + + configVal, err = blockForShimming.CoerceValue(configVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, blockForShimming) + + warns, errs := s.provider.Validate(config) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs)) + + preparedConfigMP, err := msgpack.Marshal(configVal, blockForCore.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + resp.PreparedConfig = &proto.DynamicValue{Msgpack: preparedConfigMP} + + return resp, nil +} + +func (s *GRPCProviderServer) ValidateResourceTypeConfig(_ context.Context, req *proto.ValidateResourceTypeConfig_Request) (*proto.ValidateResourceTypeConfig_Response, error) { + resp := &proto.ValidateResourceTypeConfig_Response{} + + blockForCore := s.getResourceSchemaBlockForCore(req.TypeName) + blockForShimming := s.getResourceSchemaBlockForShimming(req.TypeName) + + configVal, err := msgpack.Unmarshal(req.Config.Msgpack, blockForCore.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, blockForShimming) + + warns, errs := s.provider.ValidateResource(req.TypeName, config) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs)) + + return resp, nil +} + +func (s *GRPCProviderServer) ValidateDataSourceConfig(_ context.Context, req *proto.ValidateDataSourceConfig_Request) (*proto.ValidateDataSourceConfig_Response, error) { + resp := &proto.ValidateDataSourceConfig_Response{} + + blockForCore := s.getDatasourceSchemaBlockForCore(req.TypeName) + blockForShimming := s.getDatasourceSchemaBlockForShimming(req.TypeName) + + configVal, err := msgpack.Unmarshal(req.Config.Msgpack, blockForCore.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, blockForShimming) + + warns, errs := s.provider.ValidateDataSource(req.TypeName, config) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs)) + + return resp, nil +} + +func (s *GRPCProviderServer) UpgradeResourceState(_ context.Context, req *proto.UpgradeResourceState_Request) (*proto.UpgradeResourceState_Response, error) { + resp := &proto.UpgradeResourceState_Response{} + + res := s.provider.ResourcesMap[req.TypeName] + blockForCore := s.getResourceSchemaBlockForCore(req.TypeName) + blockForShimming := s.getResourceSchemaBlockForShimming(req.TypeName) + + version := int(req.Version) + + var jsonMap map[string]interface{} + var err error + + // if there's a JSON state, we need to decode it. + if len(req.RawState.Json) > 0 { + err = json.Unmarshal(req.RawState.Json, &jsonMap) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + } + + // We first need to upgrade a flatmap state if it exists. + // There should never be both a JSON and Flatmap state in the request. + if req.RawState.Flatmap != nil { + jsonMap, version, err = s.upgradeFlatmapState(version, req.RawState.Flatmap, res) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + } + + // complete the upgrade of the JSON states + jsonMap, err = s.upgradeJSONState(version, jsonMap, res) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // now we need to turn the state into the default json representation, so + // that it can be re-decoded using the actual schema. + val, err := schema.JSONMapToStateValue(jsonMap, blockForShimming) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // encode the final state to the expected msgpack format + newStateMP, err := msgpack.Marshal(val, blockForCore.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + resp.UpgradedState = &proto.DynamicValue{Msgpack: newStateMP} + return resp, nil +} + +// upgradeFlatmapState takes a legacy flatmap state, upgrades it using Migrate +// state if necessary, and converts it to the new JSON state format decoded as a +// map[string]interface{}. +// upgradeFlatmapState returns the json map along with the corresponding schema +// version. +func (s *GRPCProviderServer) upgradeFlatmapState(version int, m map[string]string, res *schema.Resource) (map[string]interface{}, int, error) { + // this will be the version we've upgraded so, defaulting to the given + // version in case no migration was called. + upgradedVersion := version + + // first determine if we need to call the legacy MigrateState func + requiresMigrate := version < res.SchemaVersion + + schemaType := schema.LegacyResourceSchema(res).CoreConfigSchema().ImpliedType() + + // if there are any StateUpgraders, then we need to only compare + // against the first version there + if len(res.StateUpgraders) > 0 { + requiresMigrate = version < res.StateUpgraders[0].Version + } + + if requiresMigrate { + if res.MigrateState == nil { + return nil, 0, errors.New("cannot upgrade state, missing MigrateState function") + } + + is := &terraform.InstanceState{ + ID: m["id"], + Attributes: m, + Meta: map[string]interface{}{ + "schema_version": strconv.Itoa(version), + }, + } + + is, err := res.MigrateState(version, is, s.provider.Meta()) + if err != nil { + return nil, 0, err + } + + // re-assign the map in case there was a copy made, making sure to keep + // the ID + m := is.Attributes + m["id"] = is.ID + + // if there are further upgraders, then we've only updated that far + if len(res.StateUpgraders) > 0 { + schemaType = res.StateUpgraders[0].Type + upgradedVersion = res.StateUpgraders[0].Version + } + } else { + // the schema version may be newer than the MigrateState functions + // handled and older than the current, but still stored in the flatmap + // form. If that's the case, we need to find the correct schema type to + // convert the state. + for _, upgrader := range res.StateUpgraders { + if upgrader.Version == version { + schemaType = upgrader.Type + break + } + } + } + + // now we know the state is up to the latest version that handled the + // flatmap format state. Now we can upgrade the format and continue from + // there. + newConfigVal, err := hcl2shim.HCL2ValueFromFlatmap(m, schemaType) + if err != nil { + return nil, 0, err + } + + jsonMap, err := schema.StateValueToJSONMap(newConfigVal, schemaType) + return jsonMap, upgradedVersion, err +} + +func (s *GRPCProviderServer) upgradeJSONState(version int, m map[string]interface{}, res *schema.Resource) (map[string]interface{}, error) { + var err error + + for _, upgrader := range res.StateUpgraders { + if version != upgrader.Version { + continue + } + + m, err = upgrader.Upgrade(m, s.provider.Meta()) + if err != nil { + return nil, err + } + version++ + } + + return m, nil +} + +func (s *GRPCProviderServer) Stop(_ context.Context, _ *proto.Stop_Request) (*proto.Stop_Response, error) { + resp := &proto.Stop_Response{} + + err := s.provider.Stop() + if err != nil { + resp.Error = err.Error() + } + + return resp, nil +} + +func (s *GRPCProviderServer) Configure(_ context.Context, req *proto.Configure_Request) (*proto.Configure_Response, error) { + resp := &proto.Configure_Response{} + + blockForCore := s.getProviderSchemaBlockForCore() + blockForShimming := s.getProviderSchemaBlockForShimming() + + configVal, err := msgpack.Unmarshal(req.Config.Msgpack, blockForCore.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + s.provider.TerraformVersion = req.TerraformVersion + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, blockForShimming) + err = s.provider.Configure(config) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + + return resp, nil +} + +func (s *GRPCProviderServer) ReadResource(_ context.Context, req *proto.ReadResource_Request) (*proto.ReadResource_Response, error) { + resp := &proto.ReadResource_Response{} + + res := s.provider.ResourcesMap[req.TypeName] + blockForCore := s.getResourceSchemaBlockForCore(req.TypeName) + blockForShimming := s.getResourceSchemaBlockForShimming(req.TypeName) + + stateVal, err := msgpack.Unmarshal(req.CurrentState.Msgpack, blockForCore.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + instanceState, err := res.ShimInstanceStateFromValue(stateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + newInstanceState, err := res.RefreshWithoutUpgrade(instanceState, s.provider.Meta()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + if newInstanceState == nil || newInstanceState.ID == "" { + // The old provider API used an empty id to signal that the remote + // object appears to have been deleted, but our new protocol expects + // to see a null value (in the cty sense) in that case. + newStateMP, err := msgpack.Marshal(cty.NullVal(blockForCore.ImpliedType()), blockForCore.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + } + resp.NewState = &proto.DynamicValue{ + Msgpack: newStateMP, + } + return resp, nil + } + + // helper/schema should always copy the ID over, but do it again just to be safe + newInstanceState.Attributes["id"] = newInstanceState.ID + + newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(newInstanceState.Attributes, blockForShimming.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + newStateVal = normalizeNullValues(newStateVal, stateVal, true) + newStateVal = copyTimeoutValues(newStateVal, stateVal) + + newStateMP, err := msgpack.Marshal(newStateVal, blockForCore.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + resp.NewState = &proto.DynamicValue{ + Msgpack: newStateMP, + } + + return resp, nil +} + +func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.PlanResourceChange_Request) (*proto.PlanResourceChange_Response, error) { + resp := &proto.PlanResourceChange_Response{} + + // This is a signal to Terraform Core that we're doing the best we can to + // shim the legacy type system of the SDK onto the Terraform type system + // but we need it to cut us some slack. This setting should not be taken + // forward to any new SDK implementations, since setting it prevents us + // from catching certain classes of provider bug that can lead to + // confusing downstream errors. + resp.LegacyTypeSystem = true + + res := s.provider.ResourcesMap[req.TypeName] + blockForCore := s.getResourceSchemaBlockForCore(req.TypeName) + blockForShimming := s.getResourceSchemaBlockForShimming(req.TypeName) + + priorStateVal, err := msgpack.Unmarshal(req.PriorState.Msgpack, blockForCore.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + create := priorStateVal.IsNull() + + proposedNewStateVal, err := msgpack.Unmarshal(req.ProposedNewState.Msgpack, blockForCore.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // We don't usually plan destroys, but this can return early in any case. + if proposedNewStateVal.IsNull() { + resp.PlannedState = req.ProposedNewState + return resp, nil + } + + info := &terraform.InstanceInfo{ + Type: req.TypeName, + } + + priorState, err := res.ShimInstanceStateFromValue(priorStateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + priorPrivate := make(map[string]interface{}) + if len(req.PriorPrivate) > 0 { + if err := json.Unmarshal(req.PriorPrivate, &priorPrivate); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + } + + priorState.Meta = priorPrivate + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(proposedNewStateVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // turn the proposed state into a legacy configuration + cfg := terraform.NewResourceConfigShimmed(proposedNewStateVal, blockForShimming) + + diff, err := s.provider.SimpleDiff(info, priorState, cfg) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // if this is a new instance, we need to make sure ID is going to be computed + if create { + if diff == nil { + diff = terraform.NewInstanceDiff() + } + + diff.Attributes["id"] = &terraform.ResourceAttrDiff{ + NewComputed: true, + } + } + + if diff == nil || len(diff.Attributes) == 0 { + // schema.Provider.Diff returns nil if it ends up making a diff with no + // changes, but our new interface wants us to return an actual change + // description that _shows_ there are no changes. This is always the + // prior state, because we force a diff above if this is a new instance. + resp.PlannedState = req.PriorState + return resp, nil + } + + if priorState == nil { + priorState = &terraform.InstanceState{} + } + + // now we need to apply the diff to the prior state, so get the planned state + plannedAttrs, err := diff.Apply(priorState.Attributes, blockForShimming) + + plannedStateVal, err := hcl2shim.HCL2ValueFromFlatmap(plannedAttrs, blockForShimming.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal, err = blockForShimming.CoerceValue(plannedStateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal = normalizeNullValues(plannedStateVal, proposedNewStateVal, true) + + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal = copyTimeoutValues(plannedStateVal, proposedNewStateVal) + + // The old SDK code has some imprecisions that cause it to sometimes + // generate differences that the SDK itself does not consider significant + // but Terraform Core would. To avoid producing weird do-nothing diffs + // in that case, we'll check if the provider as produced something we + // think is "equivalent" to the prior state and just return the prior state + // itself if so, thus ensuring that Terraform Core will treat this as + // a no-op. See the docs for ValuesSDKEquivalent for some caveats on its + // accuracy. + forceNoChanges := false + if hcl2shim.ValuesSDKEquivalent(priorStateVal, plannedStateVal) { + plannedStateVal = priorStateVal + forceNoChanges = true + } + + // if this was creating the resource, we need to set any remaining computed + // fields + if create { + plannedStateVal = SetUnknowns(plannedStateVal, blockForShimming) + } + + plannedMP, err := msgpack.Marshal(plannedStateVal, blockForCore.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.PlannedState = &proto.DynamicValue{ + Msgpack: plannedMP, + } + + // Now we need to store any NewExtra values, which are where any actual + // StateFunc modified config fields are hidden. + privateMap := diff.Meta + if privateMap == nil { + privateMap = map[string]interface{}{} + } + + newExtra := map[string]interface{}{} + + for k, v := range diff.Attributes { + if v.NewExtra != nil { + newExtra[k] = v.NewExtra + } + } + privateMap[newExtraKey] = newExtra + + // the Meta field gets encoded into PlannedPrivate + plannedPrivate, err := json.Marshal(privateMap) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.PlannedPrivate = plannedPrivate + + // collect the attributes that require instance replacement, and convert + // them to cty.Paths. + var requiresNew []string + if !forceNoChanges { + for attr, d := range diff.Attributes { + if d.RequiresNew { + requiresNew = append(requiresNew, attr) + } + } + } + + // If anything requires a new resource already, or the "id" field indicates + // that we will be creating a new resource, then we need to add that to + // RequiresReplace so that core can tell if the instance is being replaced + // even if changes are being suppressed via "ignore_changes". + id := plannedStateVal.GetAttr("id") + if len(requiresNew) > 0 || id.IsNull() || !id.IsKnown() { + requiresNew = append(requiresNew, "id") + } + + requiresReplace, err := hcl2shim.RequiresReplace(requiresNew, blockForShimming.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // convert these to the protocol structures + for _, p := range requiresReplace { + resp.RequiresReplace = append(resp.RequiresReplace, pathToAttributePath(p)) + } + + return resp, nil +} + +func (s *GRPCProviderServer) ApplyResourceChange(_ context.Context, req *proto.ApplyResourceChange_Request) (*proto.ApplyResourceChange_Response, error) { + resp := &proto.ApplyResourceChange_Response{ + // Start with the existing state as a fallback + NewState: req.PriorState, + } + + res := s.provider.ResourcesMap[req.TypeName] + blockForCore := s.getResourceSchemaBlockForCore(req.TypeName) + blockForShimming := s.getResourceSchemaBlockForShimming(req.TypeName) + + priorStateVal, err := msgpack.Unmarshal(req.PriorState.Msgpack, blockForCore.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal, err := msgpack.Unmarshal(req.PlannedState.Msgpack, blockForCore.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + info := &terraform.InstanceInfo{ + Type: req.TypeName, + } + + priorState, err := res.ShimInstanceStateFromValue(priorStateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + private := make(map[string]interface{}) + if len(req.PlannedPrivate) > 0 { + if err := json.Unmarshal(req.PlannedPrivate, &private); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + } + + var diff *terraform.InstanceDiff + destroy := false + + // a null state means we are destroying the instance + if plannedStateVal.IsNull() { + destroy = true + diff = &terraform.InstanceDiff{ + Attributes: make(map[string]*terraform.ResourceAttrDiff), + Meta: make(map[string]interface{}), + Destroy: true, + } + } else { + diff, err = schema.DiffFromValues(priorStateVal, plannedStateVal, stripResourceModifiers(res)) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + } + + if diff == nil { + diff = &terraform.InstanceDiff{ + Attributes: make(map[string]*terraform.ResourceAttrDiff), + Meta: make(map[string]interface{}), + } + } + + // add NewExtra Fields that may have been stored in the private data + if newExtra := private[newExtraKey]; newExtra != nil { + for k, v := range newExtra.(map[string]interface{}) { + d := diff.Attributes[k] + + if d == nil { + d = &terraform.ResourceAttrDiff{} + } + + d.NewExtra = v + diff.Attributes[k] = d + } + } + + if private != nil { + diff.Meta = private + } + + for k, d := range diff.Attributes { + // We need to turn off any RequiresNew. There could be attributes + // without changes in here inserted by helper/schema, but if they have + // RequiresNew then the state will be dropped from the ResourceData. + d.RequiresNew = false + + // Check that any "removed" attributes that don't actually exist in the + // prior state, or helper/schema will confuse itself + if d.NewRemoved { + if _, ok := priorState.Attributes[k]; !ok { + delete(diff.Attributes, k) + } + } + } + + newInstanceState, err := s.provider.Apply(info, priorState, diff) + // we record the error here, but continue processing any returned state. + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + } + newStateVal := cty.NullVal(blockForShimming.ImpliedType()) + + // Always return a null value for destroy. + // While this is usually indicated by a nil state, check for missing ID or + // attributes in the case of a provider failure. + if destroy || newInstanceState == nil || newInstanceState.Attributes == nil || newInstanceState.ID == "" { + newStateMP, err := msgpack.Marshal(newStateVal, blockForCore.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.NewState = &proto.DynamicValue{ + Msgpack: newStateMP, + } + return resp, nil + } + + // We keep the null val if we destroyed the resource, otherwise build the + // entire object, even if the new state was nil. + newStateVal, err = schema.StateValueFromInstanceState(newInstanceState, blockForShimming.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + newStateVal = normalizeNullValues(newStateVal, plannedStateVal, false) + + newStateVal = copyTimeoutValues(newStateVal, plannedStateVal) + + newStateMP, err := msgpack.Marshal(newStateVal, blockForCore.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.NewState = &proto.DynamicValue{ + Msgpack: newStateMP, + } + + meta, err := json.Marshal(newInstanceState.Meta) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.Private = meta + + // This is a signal to Terraform Core that we're doing the best we can to + // shim the legacy type system of the SDK onto the Terraform type system + // but we need it to cut us some slack. This setting should not be taken + // forward to any new SDK implementations, since setting it prevents us + // from catching certain classes of provider bug that can lead to + // confusing downstream errors. + resp.LegacyTypeSystem = true + + return resp, nil +} + +func (s *GRPCProviderServer) ImportResourceState(_ context.Context, req *proto.ImportResourceState_Request) (*proto.ImportResourceState_Response, error) { + resp := &proto.ImportResourceState_Response{} + + info := &terraform.InstanceInfo{ + Type: req.TypeName, + } + + newInstanceStates, err := s.provider.ImportState(info, req.Id) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + for _, is := range newInstanceStates { + // copy the ID again just to be sure it wasn't missed + is.Attributes["id"] = is.ID + + resourceType := is.Ephemeral.Type + if resourceType == "" { + resourceType = req.TypeName + } + + blockForCore := s.getResourceSchemaBlockForCore(resourceType) + blockForShimming := s.getResourceSchemaBlockForShimming(resourceType) + newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(is.Attributes, blockForShimming.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + newStateMP, err := msgpack.Marshal(newStateVal, blockForCore.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + meta, err := json.Marshal(is.Meta) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + importedResource := &proto.ImportResourceState_ImportedResource{ + TypeName: resourceType, + State: &proto.DynamicValue{ + Msgpack: newStateMP, + }, + Private: meta, + } + + resp.ImportedResources = append(resp.ImportedResources, importedResource) + } + + return resp, nil +} + +func (s *GRPCProviderServer) ReadDataSource(_ context.Context, req *proto.ReadDataSource_Request) (*proto.ReadDataSource_Response, error) { + resp := &proto.ReadDataSource_Response{} + + blockForCore := s.getDatasourceSchemaBlockForCore(req.TypeName) + blockForShimming := s.getDatasourceSchemaBlockForShimming(req.TypeName) + + configVal, err := msgpack.Unmarshal(req.Config.Msgpack, blockForCore.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + info := &terraform.InstanceInfo{ + Type: req.TypeName, + } + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, blockForShimming) + + // we need to still build the diff separately with the Read method to match + // the old behavior + diff, err := s.provider.ReadDataDiff(info, config) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // now we can get the new complete data source + newInstanceState, err := s.provider.ReadDataApply(info, diff) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + newStateVal, err := schema.StateValueFromInstanceState(newInstanceState, blockForShimming.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + newStateVal = copyTimeoutValues(newStateVal, configVal) + + newStateMP, err := msgpack.Marshal(newStateVal, blockForCore.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.State = &proto.DynamicValue{ + Msgpack: newStateMP, + } + return resp, nil +} + +func pathToAttributePath(path cty.Path) *proto.AttributePath { + var steps []*proto.AttributePath_Step + + for _, step := range path { + switch s := step.(type) { + case cty.GetAttrStep: + steps = append(steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: s.Name, + }, + }) + case cty.IndexStep: + ty := s.Key.Type() + switch ty { + case cty.Number: + i, _ := s.Key.AsBigFloat().Int64() + steps = append(steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_ElementKeyInt{ + ElementKeyInt: i, + }, + }) + case cty.String: + steps = append(steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_ElementKeyString{ + ElementKeyString: s.Key.AsString(), + }, + }) + } + } + } + + return &proto.AttributePath{Steps: steps} +} + +// helper/schema throws away timeout values from the config and stores them in +// the Private/Meta fields. we need to copy those values into the planned state +// so that core doesn't see a perpetual diff with the timeout block. +func copyTimeoutValues(to cty.Value, from cty.Value) cty.Value { + // if `to` is null we are planning to remove it altogether. + if to.IsNull() { + return to + } + toAttrs := to.AsValueMap() + // We need to remove the key since the hcl2shims will add a non-null block + // because we can't determine if a single block was null from the flatmapped + // values. This needs to conform to the correct schema for marshaling, so + // change the value to null rather than deleting it from the object map. + timeouts, ok := toAttrs[schema.TimeoutsConfigKey] + if ok { + toAttrs[schema.TimeoutsConfigKey] = cty.NullVal(timeouts.Type()) + } + + // if from is null then there are no timeouts to copy + if from.IsNull() { + return cty.ObjectVal(toAttrs) + } + + fromAttrs := from.AsValueMap() + timeouts, ok = fromAttrs[schema.TimeoutsConfigKey] + + // timeouts shouldn't be unknown, but don't copy possibly invalid values either + if !ok || timeouts.IsNull() || !timeouts.IsWhollyKnown() { + // no timeouts block to copy + return cty.ObjectVal(toAttrs) + } + + toAttrs[schema.TimeoutsConfigKey] = timeouts + + return cty.ObjectVal(toAttrs) +} + +// stripResourceModifiers takes a *schema.Resource and returns a deep copy with all +// StateFuncs and CustomizeDiffs removed. This will be used during apply to +// create a diff from a planned state where the diff modifications have already +// been applied. +func stripResourceModifiers(r *schema.Resource) *schema.Resource { + if r == nil { + return nil + } + // start with a shallow copy + newResource := new(schema.Resource) + *newResource = *r + + newResource.CustomizeDiff = nil + newResource.Schema = map[string]*schema.Schema{} + + for k, s := range r.Schema { + newResource.Schema[k] = stripSchema(s) + } + + return newResource +} + +func stripSchema(s *schema.Schema) *schema.Schema { + if s == nil { + return nil + } + // start with a shallow copy + newSchema := new(schema.Schema) + *newSchema = *s + + newSchema.StateFunc = nil + + switch e := newSchema.Elem.(type) { + case *schema.Schema: + newSchema.Elem = stripSchema(e) + case *schema.Resource: + newSchema.Elem = stripResourceModifiers(e) + } + + return newSchema +} + +// Zero values and empty containers may be interchanged by the apply process. +// When there is a discrepency between src and dst value being null or empty, +// prefer the src value. This takes a little more liberty with set types, since +// we can't correlate modified set values. In the case of sets, if the src set +// was wholly known we assume the value was correctly applied and copy that +// entirely to the new value. +// While apply prefers the src value, during plan we prefer dst whenever there +// is an unknown or a set is involved, since the plan can alter the value +// however it sees fit. This however means that a CustomizeDiffFunction may not +// be able to change a null to an empty value or vice versa, but that should be +// very uncommon nor was it reliable before 0.12 either. +func normalizeNullValues(dst, src cty.Value, preferDst bool) cty.Value { + ty := dst.Type() + + if !src.IsNull() && !src.IsKnown() { + // While this seems backwards to return src when preferDst is set, it + // means this might be a plan scenario, and it must retain unknown + // interpolated placeholders, which could be lost if we're only updating + // a resource. If this is a read scenario, then there shouldn't be any + // unknowns all. + if dst.IsNull() && preferDst { + return src + } + return dst + } + + // handle null/empty changes for collections + if ty.IsCollectionType() { + if src.IsNull() && !dst.IsNull() && dst.IsKnown() { + if dst.LengthInt() == 0 { + return src + } + } + + if dst.IsNull() && !src.IsNull() && src.IsKnown() { + if src.LengthInt() == 0 { + return src + } + } + } + + if src.IsNull() || !src.IsKnown() || !dst.IsKnown() { + return dst + } + + switch { + case ty.IsMapType(), ty.IsObjectType(): + var dstMap map[string]cty.Value + if !dst.IsNull() { + dstMap = dst.AsValueMap() + } + if dstMap == nil { + dstMap = map[string]cty.Value{} + } + + srcMap := src.AsValueMap() + for key, v := range srcMap { + dstVal := dstMap[key] + if dstVal == cty.NilVal { + if preferDst && ty.IsMapType() { + // let plan shape this map however it wants + continue + } + dstVal = cty.NullVal(v.Type()) + } + dstMap[key] = normalizeNullValues(dstVal, v, preferDst) + } + + // you can't call MapVal/ObjectVal with empty maps, but nothing was + // copied in anyway. If the dst is nil, and the src is known, assume the + // src is correct. + if len(dstMap) == 0 { + if dst.IsNull() && src.IsWhollyKnown() && !preferDst { + return src + } + return dst + } + + if ty.IsMapType() { + // helper/schema will populate an optional+computed map with + // unknowns which we have to fixup here. + // It would be preferable to simply prevent any known value from + // becoming unknown, but concessions have to be made to retain the + // broken legacy behavior when possible. + for k, srcVal := range srcMap { + if !srcVal.IsNull() && srcVal.IsKnown() { + dstVal, ok := dstMap[k] + if !ok { + continue + } + + if !dstVal.IsNull() && !dstVal.IsKnown() { + dstMap[k] = srcVal + } + } + } + + return cty.MapVal(dstMap) + } + + return cty.ObjectVal(dstMap) + + case ty.IsSetType(): + // If the original was wholly known, then we expect that is what the + // provider applied. The apply process loses too much information to + // reliably re-create the set. + if src.IsWhollyKnown() && !preferDst { + return src + } + + case ty.IsListType(), ty.IsTupleType(): + // If the dst is null, and the src is known, then we lost an empty value + // so take the original. + if dst.IsNull() { + if src.IsWhollyKnown() && src.LengthInt() == 0 && !preferDst { + return src + } + + // if dst is null and src only contains unknown values, then we lost + // those during a plan (which is when preferDst is set, there would + // be no unknowns during read). + if preferDst && !src.IsNull() { + allUnknown := true + for _, v := range src.AsValueSlice() { + if v.IsKnown() { + allUnknown = false + break + } + } + if allUnknown { + return src + } + } + + return dst + } + + // if the lengths are identical, then iterate over each element in succession. + srcLen := src.LengthInt() + dstLen := dst.LengthInt() + if srcLen == dstLen && srcLen > 0 { + srcs := src.AsValueSlice() + dsts := dst.AsValueSlice() + + for i := 0; i < srcLen; i++ { + dsts[i] = normalizeNullValues(dsts[i], srcs[i], preferDst) + } + + if ty.IsTupleType() { + return cty.TupleVal(dsts) + } + return cty.ListVal(dsts) + } + + case ty.IsPrimitiveType(): + if dst.IsNull() && src.IsWhollyKnown() && !preferDst { + return src + } + } + + return dst +} + +// validateConfigNulls checks a config value for unsupported nulls before +// attempting to shim the value. While null values can mostly be ignored in the +// configuration, since they're not supported in HCL1, the case where a null +// appears in a list-like attribute (list, set, tuple) will present a nil value +// to helper/schema which can panic. Return an error to the user in this case, +// indicating the attribute with the null value. +func validateConfigNulls(v cty.Value, path cty.Path) []*proto.Diagnostic { + var diags []*proto.Diagnostic + if v.IsNull() || !v.IsKnown() { + return diags + } + + switch { + case v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType(): + it := v.ElementIterator() + for it.Next() { + kv, ev := it.Element() + if ev.IsNull() { + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "Null value found in list", + Detail: "Null values are not allowed for this attribute value.", + Attribute: convert.PathToAttributePath(append(path, cty.IndexStep{Key: kv})), + }) + continue + } + + d := validateConfigNulls(ev, append(path, cty.IndexStep{Key: kv})) + diags = convert.AppendProtoDiag(diags, d) + } + + case v.Type().IsMapType() || v.Type().IsObjectType(): + it := v.ElementIterator() + for it.Next() { + kv, ev := it.Element() + var step cty.PathStep + switch { + case v.Type().IsMapType(): + step = cty.IndexStep{Key: kv} + case v.Type().IsObjectType(): + step = cty.GetAttrStep{Name: kv.AsString()} + } + d := validateConfigNulls(ev, append(path, step)) + diags = convert.AppendProtoDiag(diags, d) + } + } + + return diags +} diff --git a/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provisioner.go b/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provisioner.go new file mode 100644 index 0000000000..14494e462d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provisioner.go @@ -0,0 +1,147 @@ +package plugin + +import ( + "log" + + "github.com/hashicorp/terraform/helper/schema" + proto "github.com/hashicorp/terraform/internal/tfplugin5" + "github.com/hashicorp/terraform/plugin/convert" + "github.com/hashicorp/terraform/terraform" + "github.com/zclconf/go-cty/cty" + ctyconvert "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/msgpack" + context "golang.org/x/net/context" +) + +// NewGRPCProvisionerServerShim wraps a terraform.ResourceProvisioner in a +// proto.ProvisionerServer implementation. If the provided provisioner is not a +// *schema.Provisioner, this will return nil, +func NewGRPCProvisionerServerShim(p terraform.ResourceProvisioner) *GRPCProvisionerServer { + sp, ok := p.(*schema.Provisioner) + if !ok { + return nil + } + return &GRPCProvisionerServer{ + provisioner: sp, + } +} + +type GRPCProvisionerServer struct { + provisioner *schema.Provisioner +} + +func (s *GRPCProvisionerServer) GetSchema(_ context.Context, req *proto.GetProvisionerSchema_Request) (*proto.GetProvisionerSchema_Response, error) { + resp := &proto.GetProvisionerSchema_Response{} + + resp.Provisioner = &proto.Schema{ + Block: convert.ConfigSchemaToProto(schema.InternalMap(s.provisioner.Schema).CoreConfigSchema()), + } + + return resp, nil +} + +func (s *GRPCProvisionerServer) ValidateProvisionerConfig(_ context.Context, req *proto.ValidateProvisionerConfig_Request) (*proto.ValidateProvisionerConfig_Response, error) { + resp := &proto.ValidateProvisionerConfig_Response{} + + cfgSchema := schema.InternalMap(s.provisioner.Schema).CoreConfigSchema() + + configVal, err := msgpack.Unmarshal(req.Config.Msgpack, cfgSchema.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, cfgSchema) + + warns, errs := s.provisioner.Validate(config) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs)) + + return resp, nil +} + +// stringMapFromValue converts a cty.Value to a map[stirng]string. +// This will panic if the val is not a cty.Map(cty.String). +func stringMapFromValue(val cty.Value) map[string]string { + m := map[string]string{} + if val.IsNull() || !val.IsKnown() { + return m + } + + for it := val.ElementIterator(); it.Next(); { + ak, av := it.Element() + name := ak.AsString() + + if !av.IsKnown() || av.IsNull() { + continue + } + + av, _ = ctyconvert.Convert(av, cty.String) + m[name] = av.AsString() + } + + return m +} + +// uiOutput implements the terraform.UIOutput interface to adapt the grpc +// stream to the legacy Provisioner.Apply method. +type uiOutput struct { + srv proto.Provisioner_ProvisionResourceServer +} + +func (o uiOutput) Output(s string) { + err := o.srv.Send(&proto.ProvisionResource_Response{ + Output: s, + }) + if err != nil { + log.Printf("[ERROR] %s", err) + } +} + +func (s *GRPCProvisionerServer) ProvisionResource(req *proto.ProvisionResource_Request, srv proto.Provisioner_ProvisionResourceServer) error { + // We send back a diagnostics over the stream if there was a + // provisioner-side problem. + srvResp := &proto.ProvisionResource_Response{} + + cfgSchema := schema.InternalMap(s.provisioner.Schema).CoreConfigSchema() + cfgVal, err := msgpack.Unmarshal(req.Config.Msgpack, cfgSchema.ImpliedType()) + if err != nil { + srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err) + srv.Send(srvResp) + return nil + } + resourceConfig := terraform.NewResourceConfigShimmed(cfgVal, cfgSchema) + + connVal, err := msgpack.Unmarshal(req.Connection.Msgpack, cty.Map(cty.String)) + if err != nil { + srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err) + srv.Send(srvResp) + return nil + } + + conn := stringMapFromValue(connVal) + + instanceState := &terraform.InstanceState{ + Ephemeral: terraform.EphemeralState{ + ConnInfo: conn, + }, + Meta: make(map[string]interface{}), + } + + err = s.provisioner.Apply(uiOutput{srv}, instanceState, resourceConfig) + if err != nil { + srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err) + srv.Send(srvResp) + } + return nil +} + +func (s *GRPCProvisionerServer) Stop(_ context.Context, req *proto.Stop_Request) (*proto.Stop_Response, error) { + resp := &proto.Stop_Response{} + + err := s.provisioner.Stop() + if err != nil { + resp.Error = err.Error() + } + + return resp, nil +} diff --git a/vendor/github.com/hashicorp/terraform/helper/plugin/unknown.go b/vendor/github.com/hashicorp/terraform/helper/plugin/unknown.go new file mode 100644 index 0000000000..64a6784e8b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/plugin/unknown.go @@ -0,0 +1,131 @@ +package plugin + +import ( + "fmt" + + "github.com/hashicorp/terraform/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// SetUnknowns takes a cty.Value, and compares it to the schema setting any null +// values which are computed to unknown. +func SetUnknowns(val cty.Value, schema *configschema.Block) cty.Value { + if !val.IsKnown() { + return val + } + + // If the object was null, we still need to handle the top level attributes + // which might be computed, but we don't need to expand the blocks. + if val.IsNull() { + objMap := map[string]cty.Value{} + allNull := true + for name, attr := range schema.Attributes { + switch { + case attr.Computed: + objMap[name] = cty.UnknownVal(attr.Type) + allNull = false + default: + objMap[name] = cty.NullVal(attr.Type) + } + } + + // If this object has no unknown attributes, then we can leave it null. + if allNull { + return val + } + + return cty.ObjectVal(objMap) + } + + valMap := val.AsValueMap() + newVals := make(map[string]cty.Value) + + for name, attr := range schema.Attributes { + v := valMap[name] + + if attr.Computed && v.IsNull() { + newVals[name] = cty.UnknownVal(attr.Type) + continue + } + + newVals[name] = v + } + + for name, blockS := range schema.BlockTypes { + blockVal := valMap[name] + if blockVal.IsNull() || !blockVal.IsKnown() { + newVals[name] = blockVal + continue + } + + blockValType := blockVal.Type() + blockElementType := blockS.Block.ImpliedType() + + // This switches on the value type here, so we can correctly switch + // between Tuples/Lists and Maps/Objects. + switch { + case blockS.Nesting == configschema.NestingSingle || blockS.Nesting == configschema.NestingGroup: + // NestingSingle is the only exception here, where we treat the + // block directly as an object + newVals[name] = SetUnknowns(blockVal, &blockS.Block) + + case blockValType.IsSetType(), blockValType.IsListType(), blockValType.IsTupleType(): + listVals := blockVal.AsValueSlice() + newListVals := make([]cty.Value, 0, len(listVals)) + + for _, v := range listVals { + newListVals = append(newListVals, SetUnknowns(v, &blockS.Block)) + } + + switch { + case blockValType.IsSetType(): + switch len(newListVals) { + case 0: + newVals[name] = cty.SetValEmpty(blockElementType) + default: + newVals[name] = cty.SetVal(newListVals) + } + case blockValType.IsListType(): + switch len(newListVals) { + case 0: + newVals[name] = cty.ListValEmpty(blockElementType) + default: + newVals[name] = cty.ListVal(newListVals) + } + case blockValType.IsTupleType(): + newVals[name] = cty.TupleVal(newListVals) + } + + case blockValType.IsMapType(), blockValType.IsObjectType(): + mapVals := blockVal.AsValueMap() + newMapVals := make(map[string]cty.Value) + + for k, v := range mapVals { + newMapVals[k] = SetUnknowns(v, &blockS.Block) + } + + switch { + case blockValType.IsMapType(): + switch len(newMapVals) { + case 0: + newVals[name] = cty.MapValEmpty(blockElementType) + default: + newVals[name] = cty.MapVal(newMapVals) + } + case blockValType.IsObjectType(): + if len(newMapVals) == 0 { + // We need to populate empty values to make a valid object. + for attr, ty := range blockElementType.AttributeTypes() { + newMapVals[attr] = cty.NullVal(ty) + } + } + newVals[name] = cty.ObjectVal(newMapVals) + } + + default: + panic(fmt.Sprintf("failed to set unknown values for nested block %q:%#v", name, blockValType)) + } + } + + return cty.ObjectVal(newVals) +} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/grpc_test_provider.go b/vendor/github.com/hashicorp/terraform/helper/resource/grpc_test_provider.go new file mode 100644 index 0000000000..8cfa8e7f59 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/resource/grpc_test_provider.go @@ -0,0 +1,43 @@ +package resource + +import ( + "context" + "net" + "time" + + "github.com/hashicorp/terraform/helper/plugin" + proto "github.com/hashicorp/terraform/internal/tfplugin5" + tfplugin "github.com/hashicorp/terraform/plugin" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/grpc" + "google.golang.org/grpc/test/bufconn" +) + +// GRPCTestProvider takes a legacy ResourceProvider, wraps it in the new GRPC +// shim and starts it in a grpc server using an inmem connection. It returns a +// GRPCClient for this new server to test the shimmed resource provider. +func GRPCTestProvider(rp terraform.ResourceProvider) providers.Interface { + listener := bufconn.Listen(256 * 1024) + grpcServer := grpc.NewServer() + + p := plugin.NewGRPCProviderServerShim(rp) + proto.RegisterProviderServer(grpcServer, p) + + go grpcServer.Serve(listener) + + conn, err := grpc.Dial("", grpc.WithDialer(func(string, time.Duration) (net.Conn, error) { + return listener.Dial() + }), grpc.WithInsecure()) + if err != nil { + panic(err) + } + + var pp tfplugin.GRPCProviderPlugin + client, _ := pp.GRPCClient(context.Background(), nil, conn) + + grpcClient := client.(*tfplugin.GRPCProvider) + grpcClient.TestListener = listener + + return grpcClient +} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/state.go b/vendor/github.com/hashicorp/terraform/helper/resource/state.go index c34e21b25c..88a839664c 100644 --- a/vendor/github.com/hashicorp/terraform/helper/resource/state.go +++ b/vendor/github.com/hashicorp/terraform/helper/resource/state.go @@ -38,7 +38,7 @@ type StateChangeConf struct { // specified in the configuration using the specified Refresh() func, // waiting the number of seconds specified in the timeout configuration. // -// If the Refresh function returns a error, exit immediately with that error. +// If the Refresh function returns an error, exit immediately with that error. // // If the Refresh function returns a state other than the Target state or one // listed in Pending, return immediately with an error. diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go b/vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go new file mode 100644 index 0000000000..b2aff99d10 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go @@ -0,0 +1,163 @@ +package resource + +import ( + "fmt" + + "github.com/hashicorp/terraform/addrs" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/config/hcl2shim" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" +) + +// shimState takes a new *states.State and reverts it to a legacy state for the provider ACC tests +func shimNewState(newState *states.State, providers map[string]terraform.ResourceProvider) (*terraform.State, error) { + state := terraform.NewState() + + // in the odd case of a nil state, let the helper packages handle it + if newState == nil { + return nil, nil + } + + for _, newMod := range newState.Modules { + mod := state.AddModule(newMod.Addr) + + for name, out := range newMod.OutputValues { + outputType := "" + val := hcl2shim.ConfigValueFromHCL2(out.Value) + ty := out.Value.Type() + switch { + case ty == cty.String: + outputType = "string" + case ty.IsTupleType() || ty.IsListType(): + outputType = "list" + case ty.IsMapType(): + outputType = "map" + } + + mod.Outputs[name] = &terraform.OutputState{ + Type: outputType, + Value: val, + Sensitive: out.Sensitive, + } + } + + for _, res := range newMod.Resources { + resType := res.Addr.Type + providerType := res.ProviderConfig.ProviderConfig.Type + + resource := getResource(providers, providerType, res.Addr) + + for key, i := range res.Instances { + flatmap, err := shimmedAttributes(i.Current, resource) + if err != nil { + return nil, fmt.Errorf("error decoding state for %q: %s", resType, err) + } + + resState := &terraform.ResourceState{ + Type: resType, + Primary: &terraform.InstanceState{ + ID: flatmap["id"], + Attributes: flatmap, + Tainted: i.Current.Status == states.ObjectTainted, + }, + Provider: res.ProviderConfig.String(), + } + if i.Current.SchemaVersion != 0 { + resState.Primary.Meta = map[string]interface{}{ + "schema_version": i.Current.SchemaVersion, + } + } + + for _, dep := range i.Current.Dependencies { + resState.Dependencies = append(resState.Dependencies, dep.String()) + } + + // convert the indexes to the old style flapmap indexes + idx := "" + switch key.(type) { + case addrs.IntKey: + // don't add numeric index values to resources with a count of 0 + if len(res.Instances) > 1 { + idx = fmt.Sprintf(".%d", key) + } + case addrs.StringKey: + idx = "." + key.String() + } + + mod.Resources[res.Addr.String()+idx] = resState + + // add any deposed instances + for _, dep := range i.Deposed { + flatmap, err := shimmedAttributes(dep, resource) + if err != nil { + return nil, fmt.Errorf("error decoding deposed state for %q: %s", resType, err) + } + + deposed := &terraform.InstanceState{ + ID: flatmap["id"], + Attributes: flatmap, + Tainted: dep.Status == states.ObjectTainted, + } + if dep.SchemaVersion != 0 { + deposed.Meta = map[string]interface{}{ + "schema_version": dep.SchemaVersion, + } + } + + resState.Deposed = append(resState.Deposed, deposed) + } + } + } + } + + return state, nil +} + +func getResource(providers map[string]terraform.ResourceProvider, providerName string, addr addrs.Resource) *schema.Resource { + p := providers[providerName] + if p == nil { + panic(fmt.Sprintf("provider %q not found in test step", providerName)) + } + + // this is only for tests, so should only see schema.Providers + provider := p.(*schema.Provider) + + switch addr.Mode { + case addrs.ManagedResourceMode: + resource := provider.ResourcesMap[addr.Type] + if resource != nil { + return resource + } + case addrs.DataResourceMode: + resource := provider.DataSourcesMap[addr.Type] + if resource != nil { + return resource + } + } + + panic(fmt.Sprintf("resource %s not found in test step", addr.Type)) +} + +func shimmedAttributes(instance *states.ResourceInstanceObjectSrc, res *schema.Resource) (map[string]string, error) { + flatmap := instance.AttrsFlat + if flatmap != nil { + return flatmap, nil + } + + // if we have json attrs, they need to be decoded + rio, err := instance.Decode(res.CoreConfigSchema().ImpliedType()) + if err != nil { + return nil, err + } + + instanceState, err := res.ShimInstanceStateFromValue(rio.Value) + if err != nil { + return nil, err + } + + return instanceState.Attributes, nil +} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go index b97673fdf5..aa7454d472 100644 --- a/vendor/github.com/hashicorp/terraform/helper/resource/testing.go +++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go @@ -1,6 +1,7 @@ package resource import ( + "bytes" "flag" "fmt" "io" @@ -18,9 +19,18 @@ import ( "github.com/hashicorp/errwrap" "github.com/hashicorp/go-multierror" "github.com/hashicorp/logutils" - "github.com/hashicorp/terraform/config/module" + "github.com/mitchellh/colorstring" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/command/format" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configload" "github.com/hashicorp/terraform/helper/logging" + "github.com/hashicorp/terraform/internal/initwd" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" ) // flagSweep is a flag available when running tests on the command line. It @@ -373,6 +383,10 @@ type TestStep struct { // be refreshed and don't matter. ImportStateVerify bool ImportStateVerifyIgnore []string + + // provider s is used internally to maintain a reference to the + // underlying providers during the tests + providers map[string]terraform.ResourceProvider } // Set to a file mask in sprintf format where %s is test name @@ -467,10 +481,22 @@ func Test(t TestT, c TestCase) { c.PreCheck() } + // get instances of all providers, so we can use the individual + // resources to shim the state during the tests. + providers := make(map[string]terraform.ResourceProvider) + for name, pf := range testProviderFactories(c) { + p, err := pf() + if err != nil { + t.Fatal(err) + } + providers[name] = p + } + providerResolver, err := testProviderResolver(c) if err != nil { t.Fatal(err) } + opts := terraform.ContextOpts{ProviderResolver: providerResolver} // A single state variable to track the lifecycle, starting with no state @@ -481,6 +507,10 @@ func Test(t TestT, c TestCase) { idRefresh := c.IDRefreshName != "" errored := false for i, step := range c.Steps { + // insert the providers into the step so we can get the resources for + // shimming the state + step.providers = providers + var err error log.Printf("[DEBUG] Test: Executing step %d", i) @@ -535,8 +565,7 @@ func Test(t TestT, c TestCase) { } } else { errored = true - t.Error(fmt.Sprintf( - "Step %d error: %s", i, err)) + t.Error(fmt.Sprintf("Step %d error: %s", i, detailedErrorMessage(err))) break } } @@ -591,6 +620,7 @@ func Test(t TestT, c TestCase) { Destroy: true, PreventDiskCleanup: lastStep.PreventDiskCleanup, PreventPostDestroyRefresh: c.PreventPostDestroyRefresh, + providers: providers, } log.Printf("[WARN] Test: Executing destroy step") @@ -620,39 +650,50 @@ func testProviderConfig(c TestCase) string { return strings.Join(lines, "") } -// testProviderResolver is a helper to build a ResourceProviderResolver -// with pre instantiated ResourceProviders, so that we can reset them for the -// test, while only calling the factory function once. -// Any errors are stored so that they can be returned by the factory in -// terraform to match non-test behavior. -func testProviderResolver(c TestCase) (terraform.ResourceProviderResolver, error) { - ctxProviders := c.ProviderFactories - if ctxProviders == nil { - ctxProviders = make(map[string]terraform.ResourceProviderFactory) +// testProviderFactories combines the fixed Providers and +// ResourceProviderFactory functions into a single map of +// ResourceProviderFactory functions. +func testProviderFactories(c TestCase) map[string]terraform.ResourceProviderFactory { + ctxProviders := make(map[string]terraform.ResourceProviderFactory) + for k, pf := range c.ProviderFactories { + ctxProviders[k] = pf } // add any fixed providers for k, p := range c.Providers { ctxProviders[k] = terraform.ResourceProviderFactoryFixed(p) } + return ctxProviders +} + +// testProviderResolver is a helper to build a ResourceProviderResolver +// with pre instantiated ResourceProviders, so that we can reset them for the +// test, while only calling the factory function once. +// Any errors are stored so that they can be returned by the factory in +// terraform to match non-test behavior. +func testProviderResolver(c TestCase) (providers.Resolver, error) { + ctxProviders := testProviderFactories(c) + + // wrap the old provider factories in the test grpc server so they can be + // called from terraform. + newProviders := make(map[string]providers.Factory) - // reset the providers if needed for k, pf := range ctxProviders { - // we can ignore any errors here, if we don't have a provider to reset - // the error will be handled later - p, err := pf() - if err != nil { - return nil, err - } - if p, ok := p.(TestProvider); ok { - err := p.TestReset() + factory := pf // must copy to ensure each closure sees its own value + newProviders[k] = func() (providers.Interface, error) { + p, err := factory() if err != nil { - return nil, fmt.Errorf("[ERROR] failed to reset provider %q: %s", k, err) + return nil, err } + + // The provider is wrapped in a GRPCTestProvider so that it can be + // passed back to terraform core as a providers.Interface, rather + // than the legacy ResourceProvider. + return GRPCTestProvider(p), nil } } - return terraform.ResourceProviderResolverFixed(ctxProviders), nil + return providers.ResolverFixed(newProviders), nil } // UnitTest is a helper to force the acceptance testing harness to run in the @@ -670,33 +711,40 @@ func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r return nil } - name := fmt.Sprintf("%s.foo", r.Type) + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: r.Type, + Name: "foo", + }.Instance(addrs.NoKey) + absAddr := addr.Absolute(addrs.RootModuleInstance) // Build the state. The state is just the resource with an ID. There // are no attributes. We only set what is needed to perform a refresh. - state := terraform.NewState() - state.RootModule().Resources[name] = &terraform.ResourceState{ - Type: r.Type, - Primary: &terraform.InstanceState{ - ID: r.Primary.ID, + state := states.NewState() + state.RootModule().SetResourceInstanceCurrent( + addr, + &states.ResourceInstanceObjectSrc{ + AttrsFlat: r.Primary.Attributes, + Status: states.ObjectReady, }, - } + addrs.ProviderConfig{Type: "placeholder"}.Absolute(addrs.RootModuleInstance), + ) // Create the config module. We use the full config because Refresh // doesn't have access to it and we may need things like provider // configurations. The initial implementation of id-only checks used // an empty config module, but that caused the aforementioned problems. - mod, err := testModule(opts, step) + cfg, err := testConfig(opts, step) if err != nil { return err } // Initialize the context - opts.Module = mod + opts.Config = cfg opts.State = state - ctx, err := terraform.NewContext(&opts) - if err != nil { - return err + ctx, ctxDiags := terraform.NewContext(&opts) + if ctxDiags.HasErrors() { + return ctxDiags.Err() } if diags := ctx.Validate(); len(diags) > 0 { if diags.HasErrors() { @@ -707,20 +755,20 @@ func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r } // Refresh! - state, err = ctx.Refresh() - if err != nil { - return fmt.Errorf("Error refreshing: %s", err) + state, refreshDiags := ctx.Refresh() + if refreshDiags.HasErrors() { + return refreshDiags.Err() } // Verify attribute equivalence. - actualR := state.RootModule().Resources[name] + actualR := state.ResourceInstance(absAddr) if actualR == nil { return fmt.Errorf("Resource gone!") } - if actualR.Primary == nil { + if actualR.Current == nil { return fmt.Errorf("Resource has no primary instance") } - actual := actualR.Primary.Attributes + actual := actualR.Current.AttrsFlat expected := r.Primary.Attributes // Remove fields we're ignoring for _, v := range c.IDRefreshIgnore { @@ -756,15 +804,14 @@ func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r return nil } -func testModule(opts terraform.ContextOpts, step TestStep) (*module.Tree, error) { +func testConfig(opts terraform.ContextOpts, step TestStep) (*configs.Config, error) { if step.PreConfig != nil { step.PreConfig() } cfgPath, err := ioutil.TempDir("", "tf-test") if err != nil { - return nil, fmt.Errorf( - "Error creating temporary directory for config: %s", err) + return nil, fmt.Errorf("Error creating temporary directory for config: %s", err) } if step.PreventDiskCleanup { @@ -773,38 +820,38 @@ func testModule(opts terraform.ContextOpts, step TestStep) (*module.Tree, error) defer os.RemoveAll(cfgPath) } - // Write the configuration - cfgF, err := os.Create(filepath.Join(cfgPath, "main.tf")) + // Write the main configuration file + err = ioutil.WriteFile(filepath.Join(cfgPath, "main.tf"), []byte(step.Config), os.ModePerm) if err != nil { - return nil, fmt.Errorf( - "Error creating temporary file for config: %s", err) + return nil, fmt.Errorf("Error creating temporary file for config: %s", err) } - _, err = io.Copy(cfgF, strings.NewReader(step.Config)) - cfgF.Close() + // Create directory for our child modules, if any. + modulesDir := filepath.Join(cfgPath, ".modules") + err = os.Mkdir(modulesDir, os.ModePerm) if err != nil { - return nil, fmt.Errorf( - "Error creating temporary file for config: %s", err) + return nil, fmt.Errorf("Error creating child modules directory: %s", err) } - // Parse the configuration - mod, err := module.NewTreeModule("", cfgPath) - if err != nil { - return nil, fmt.Errorf( - "Error loading configuration: %s", err) + inst := initwd.NewModuleInstaller(modulesDir, nil) + _, installDiags := inst.InstallModules(cfgPath, true, initwd.ModuleInstallHooksImpl{}) + if installDiags.HasErrors() { + return nil, installDiags.Err() } - // Load the modules - modStorage := &module.Storage{ - StorageDir: filepath.Join(cfgPath, ".tfmodules"), - Mode: module.GetModeGet, - } - err = mod.Load(modStorage) + loader, err := configload.NewLoader(&configload.Config{ + ModulesDir: modulesDir, + }) if err != nil { - return nil, fmt.Errorf("Error downloading modules: %s", err) + return nil, fmt.Errorf("failed to create config loader: %s", err) + } + + config, configDiags := loader.LoadConfig(cfgPath) + if configDiags.HasErrors() { + return nil, configDiags } - return mod, nil + return config, nil } func testResource(c TestStep, state *terraform.State) (*terraform.ResourceState, error) { @@ -881,8 +928,9 @@ func TestCheckResourceAttrSet(name, key string) TestCheckFunc { // TestCheckModuleResourceAttrSet - as per TestCheckResourceAttrSet but with // support for non-root modules func TestCheckModuleResourceAttrSet(mp []string, name string, key string) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() return func(s *terraform.State) error { - is, err := modulePathPrimaryInstanceState(s, mp, name) + is, err := modulePathPrimaryInstanceState(s, mpt, name) if err != nil { return err } @@ -915,8 +963,9 @@ func TestCheckResourceAttr(name, key, value string) TestCheckFunc { // TestCheckModuleResourceAttr - as per TestCheckResourceAttr but with // support for non-root modules func TestCheckModuleResourceAttr(mp []string, name string, key string, value string) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() return func(s *terraform.State) error { - is, err := modulePathPrimaryInstanceState(s, mp, name) + is, err := modulePathPrimaryInstanceState(s, mpt, name) if err != nil { return err } @@ -926,7 +975,19 @@ func TestCheckModuleResourceAttr(mp []string, name string, key string, value str } func testCheckResourceAttr(is *terraform.InstanceState, name string, key string, value string) error { + // Empty containers may be elided from the state. + // If the intent here is to check for an empty container, allow the key to + // also be non-existent. + emptyCheck := false + if value == "0" && (strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { + emptyCheck = true + } + if v, ok := is.Attributes[key]; !ok || v != value { + if emptyCheck && !ok { + return nil + } + if !ok { return fmt.Errorf("%s: Attribute '%s' not found", name, key) } @@ -957,8 +1018,9 @@ func TestCheckNoResourceAttr(name, key string) TestCheckFunc { // TestCheckModuleNoResourceAttr - as per TestCheckNoResourceAttr but with // support for non-root modules func TestCheckModuleNoResourceAttr(mp []string, name string, key string) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() return func(s *terraform.State) error { - is, err := modulePathPrimaryInstanceState(s, mp, name) + is, err := modulePathPrimaryInstanceState(s, mpt, name) if err != nil { return err } @@ -968,7 +1030,20 @@ func TestCheckModuleNoResourceAttr(mp []string, name string, key string) TestChe } func testCheckNoResourceAttr(is *terraform.InstanceState, name string, key string) error { - if _, ok := is.Attributes[key]; ok { + // Empty containers may sometimes be included in the state. + // If the intent here is to check for an empty container, allow the value to + // also be "0". + emptyCheck := false + if strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%") { + emptyCheck = true + } + + val, exists := is.Attributes[key] + if emptyCheck && val == "0" { + return nil + } + + if exists { return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key) } @@ -991,8 +1066,9 @@ func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc { // TestModuleMatchResourceAttr - as per TestMatchResourceAttr but with // support for non-root modules func TestModuleMatchResourceAttr(mp []string, name string, key string, r *regexp.Regexp) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() return func(s *terraform.State) error { - is, err := modulePathPrimaryInstanceState(s, mp, name) + is, err := modulePathPrimaryInstanceState(s, mpt, name) if err != nil { return err } @@ -1052,13 +1128,15 @@ func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string // TestCheckModuleResourceAttrPair - as per TestCheckResourceAttrPair but with // support for non-root modules func TestCheckModuleResourceAttrPair(mpFirst []string, nameFirst string, keyFirst string, mpSecond []string, nameSecond string, keySecond string) TestCheckFunc { + mptFirst := addrs.Module(mpFirst).UnkeyedInstanceShim() + mptSecond := addrs.Module(mpSecond).UnkeyedInstanceShim() return func(s *terraform.State) error { - isFirst, err := modulePathPrimaryInstanceState(s, mpFirst, nameFirst) + isFirst, err := modulePathPrimaryInstanceState(s, mptFirst, nameFirst) if err != nil { return err } - isSecond, err := modulePathPrimaryInstanceState(s, mpSecond, nameSecond) + isSecond, err := modulePathPrimaryInstanceState(s, mptSecond, nameSecond) if err != nil { return err } @@ -1068,14 +1146,32 @@ func TestCheckModuleResourceAttrPair(mpFirst []string, nameFirst string, keyFirs } func testCheckResourceAttrPair(isFirst *terraform.InstanceState, nameFirst string, keyFirst string, isSecond *terraform.InstanceState, nameSecond string, keySecond string) error { - vFirst, ok := isFirst.Attributes[keyFirst] - if !ok { - return fmt.Errorf("%s: Attribute '%s' not found", nameFirst, keyFirst) + vFirst, okFirst := isFirst.Attributes[keyFirst] + vSecond, okSecond := isSecond.Attributes[keySecond] + + // Container count values of 0 should not be relied upon, and not reliably + // maintained by helper/schema. For the purpose of tests, consider unset and + // 0 to be equal. + if len(keyFirst) > 2 && len(keySecond) > 2 && keyFirst[len(keyFirst)-2:] == keySecond[len(keySecond)-2:] && + (strings.HasSuffix(keyFirst, ".#") || strings.HasSuffix(keyFirst, ".%")) { + // they have the same suffix, and it is a collection count key. + if vFirst == "0" || vFirst == "" { + okFirst = false + } + if vSecond == "0" || vSecond == "" { + okSecond = false + } } - vSecond, ok := isSecond.Attributes[keySecond] - if !ok { - return fmt.Errorf("%s: Attribute '%s' not found", nameSecond, keySecond) + if okFirst != okSecond { + if !okFirst { + return fmt.Errorf("%s: Attribute %q not set, but %q is set in %s as %q", nameFirst, keyFirst, keySecond, nameSecond, vSecond) + } + return fmt.Errorf("%s: Attribute %q is %q, but %q is not set in %s", nameFirst, keyFirst, vFirst, keySecond, nameSecond) + } + if !(okFirst || okSecond) { + // If they both don't exist then they are equally unset, so that's okay. + return nil } if vFirst != vSecond { @@ -1163,7 +1259,7 @@ func modulePrimaryInstanceState(s *terraform.State, ms *terraform.ModuleState, n // modulePathPrimaryInstanceState returns the primary instance state for the // given resource name in a given module path. -func modulePathPrimaryInstanceState(s *terraform.State, mp []string, name string) (*terraform.InstanceState, error) { +func modulePathPrimaryInstanceState(s *terraform.State, mp addrs.ModuleInstance, name string) (*terraform.InstanceState, error) { ms := s.ModuleByPath(mp) if ms == nil { return nil, fmt.Errorf("No module found at: %s", mp) @@ -1178,3 +1274,47 @@ func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceS ms := s.RootModule() return modulePrimaryInstanceState(s, ms, name) } + +// operationError is a specialized implementation of error used to describe +// failures during one of the several operations performed for a particular +// test case. +type operationError struct { + OpName string + Diags tfdiags.Diagnostics +} + +func newOperationError(opName string, diags tfdiags.Diagnostics) error { + return operationError{opName, diags} +} + +// Error returns a terse error string containing just the basic diagnostic +// messages, for situations where normal Go error behavior is appropriate. +func (err operationError) Error() string { + return fmt.Sprintf("errors during %s: %s", err.OpName, err.Diags.Err().Error()) +} + +// ErrorDetail is like Error except it includes verbosely-rendered diagnostics +// similar to what would come from a normal Terraform run, which include +// additional context not included in Error(). +func (err operationError) ErrorDetail() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "errors during %s:", err.OpName) + clr := &colorstring.Colorize{Disable: true, Colors: colorstring.DefaultColors} + for _, diag := range err.Diags { + diagStr := format.Diagnostic(diag, nil, clr, 78) + buf.WriteByte('\n') + buf.WriteString(diagStr) + } + return buf.String() +} + +// detailedErrorMessage is a helper for calling ErrorDetail on an error if +// it is an operationError or just taking Error otherwise. +func detailedErrorMessage(err error) string { + switch tErr := err.(type) { + case operationError: + return tErr.ErrorDetail() + default: + return err.Error() + } +} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go index 033f1266d6..311fdb6ef5 100644 --- a/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go +++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go @@ -1,13 +1,23 @@ package resource import ( + "bufio" + "bytes" "errors" "fmt" "log" + "sort" "strings" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/hcl2shim" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/plans" "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" ) // testStepConfig runs a config-mode test step @@ -18,69 +28,79 @@ func testStepConfig( return testStep(opts, state, step) } -func testStep( - opts terraform.ContextOpts, - state *terraform.State, - step TestStep) (*terraform.State, error) { - // Pre-taint any resources that have been defined in Taint, as long as this - // is not a destroy step. +func testStep(opts terraform.ContextOpts, state *terraform.State, step TestStep) (*terraform.State, error) { if !step.Destroy { if err := testStepTaint(state, step); err != nil { return state, err } } - mod, err := testModule(opts, step) + cfg, err := testConfig(opts, step) if err != nil { return state, err } + var stepDiags tfdiags.Diagnostics + // Build the context - opts.Module = mod - opts.State = state - opts.Destroy = step.Destroy - ctx, err := terraform.NewContext(&opts) + opts.Config = cfg + opts.State, err = terraform.ShimLegacyState(state) if err != nil { - return state, fmt.Errorf("Error initializing context: %s", err) + return nil, err + } + + opts.Destroy = step.Destroy + ctx, stepDiags := terraform.NewContext(&opts) + if stepDiags.HasErrors() { + return state, fmt.Errorf("Error initializing context: %s", stepDiags.Err()) } - if diags := ctx.Validate(); len(diags) > 0 { - if diags.HasErrors() { - return nil, errwrap.Wrapf("config is invalid: {{err}}", diags.Err()) + if stepDiags := ctx.Validate(); len(stepDiags) > 0 { + if stepDiags.HasErrors() { + return state, errwrap.Wrapf("config is invalid: {{err}}", stepDiags.Err()) } - log.Printf("[WARN] Config warnings:\n%s", diags) + log.Printf("[WARN] Config warnings:\n%s", stepDiags) } // Refresh! - state, err = ctx.Refresh() + newState, stepDiags := ctx.Refresh() + // shim the state first so the test can check the state on errors + + state, err = shimNewState(newState, step.providers) if err != nil { - return state, fmt.Errorf( - "Error refreshing: %s", err) + return nil, err + } + if stepDiags.HasErrors() { + return state, newOperationError("refresh", stepDiags) } // If this step is a PlanOnly step, skip over this first Plan and subsequent // Apply, and use the follow up Plan that checks for perpetual diffs if !step.PlanOnly { // Plan! - if p, err := ctx.Plan(); err != nil { - return state, fmt.Errorf( - "Error planning: %s", err) + if p, stepDiags := ctx.Plan(); stepDiags.HasErrors() { + return state, newOperationError("plan", stepDiags) } else { - log.Printf("[WARN] Test: Step plan: %s", p) + log.Printf("[WARN] Test: Step plan: %s", legacyPlanComparisonString(newState, p.Changes)) } // We need to keep a copy of the state prior to destroying - // such that destroy steps can verify their behaviour in the check + // such that destroy steps can verify their behavior in the check // function stateBeforeApplication := state.DeepCopy() - // Apply! - state, err = ctx.Apply() + // Apply the diff, creating real resources. + newState, stepDiags = ctx.Apply() + // shim the state first so the test can check the state on errors + state, err = shimNewState(newState, step.providers) if err != nil { - return state, fmt.Errorf("Error applying: %s", err) + return nil, err + } + if stepDiags.HasErrors() { + return state, newOperationError("apply", stepDiags) } - // Check! Excitement! + // Run any configured checks if step.Check != nil { if step.Destroy { if err := step.Check(stateBeforeApplication); err != nil { @@ -96,31 +116,35 @@ func testStep( // Now, verify that Plan is now empty and we don't have a perpetual diff issue // We do this with TWO plans. One without a refresh. - var p *terraform.Plan - if p, err = ctx.Plan(); err != nil { - return state, fmt.Errorf("Error on follow-up plan: %s", err) + var p *plans.Plan + if p, stepDiags = ctx.Plan(); stepDiags.HasErrors() { + return state, newOperationError("follow-up plan", stepDiags) } - if p.Diff != nil && !p.Diff.Empty() { + if !p.Changes.Empty() { if step.ExpectNonEmptyPlan { - log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", p) + log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) } else { return state, fmt.Errorf( - "After applying this step, the plan was not empty:\n\n%s", p) + "After applying this step, the plan was not empty:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) } } // And another after a Refresh. if !step.Destroy || (step.Destroy && !step.PreventPostDestroyRefresh) { - state, err = ctx.Refresh() + newState, stepDiags = ctx.Refresh() + if stepDiags.HasErrors() { + return state, newOperationError("follow-up refresh", stepDiags) + } + + state, err = shimNewState(newState, step.providers) if err != nil { - return state, fmt.Errorf( - "Error on follow-up refresh: %s", err) + return nil, err } } - if p, err = ctx.Plan(); err != nil { - return state, fmt.Errorf("Error on second follow-up plan: %s", err) + if p, stepDiags = ctx.Plan(); stepDiags.HasErrors() { + return state, newOperationError("second follow-up refresh", stepDiags) } - empty := p.Diff == nil || p.Diff.Empty() + empty := p.Changes.Empty() // Data resources are tricky because they legitimately get instantiated // during refresh so that they will be already populated during the @@ -128,35 +152,28 @@ func testStep( // config we'll end up wanting to destroy them again here. This is // acceptable and expected, and we'll treat it as "empty" for the // sake of this testing. - if step.Destroy { + if step.Destroy && !empty { empty = true - - for _, moduleDiff := range p.Diff.Modules { - for k, instanceDiff := range moduleDiff.Resources { - if !strings.HasPrefix(k, "data.") { - empty = false - break - } - - if !instanceDiff.Destroy { - empty = false - } + for _, change := range p.Changes.Resources { + if change.Addr.Resource.Resource.Mode != addrs.DataResourceMode { + empty = false + break } } } if !empty { if step.ExpectNonEmptyPlan { - log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", p) + log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) } else { return state, fmt.Errorf( "After applying this step and refreshing, "+ - "the plan was not empty:\n\n%s", p) + "the plan was not empty:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) } } // Made it here, but expected a non-empty plan, fail! - if step.ExpectNonEmptyPlan && (p.Diff == nil || p.Diff.Empty()) { + if step.ExpectNonEmptyPlan && empty { return state, fmt.Errorf("Expected a non-empty plan, but got an empty plan!") } @@ -164,6 +181,213 @@ func testStep( return state, nil } +// legacyPlanComparisonString produces a string representation of the changes +// from a plan and a given state togther, as was formerly produced by the +// String method of terraform.Plan. +// +// This is here only for compatibility with existing tests that predate our +// new plan and state types, and should not be used in new tests. Instead, use +// a library like "cmp" to do a deep equality and diff on the two +// data structures. +func legacyPlanComparisonString(state *states.State, changes *plans.Changes) string { + return fmt.Sprintf( + "DIFF:\n\n%s\n\nSTATE:\n\n%s", + legacyDiffComparisonString(changes), + state.String(), + ) +} + +// legacyDiffComparisonString produces a string representation of the changes +// from a planned changes object, as was formerly produced by the String method +// of terraform.Diff. +// +// This is here only for compatibility with existing tests that predate our +// new plan types, and should not be used in new tests. Instead, use a library +// like "cmp" to do a deep equality check and diff on the two data structures. +func legacyDiffComparisonString(changes *plans.Changes) string { + // The old string representation of a plan was grouped by module, but + // our new plan structure is not grouped in that way and so we'll need + // to preprocess it in order to produce that grouping. + type ResourceChanges struct { + Current *plans.ResourceInstanceChangeSrc + Deposed map[states.DeposedKey]*plans.ResourceInstanceChangeSrc + } + byModule := map[string]map[string]*ResourceChanges{} + resourceKeys := map[string][]string{} + requiresReplace := map[string][]string{} + var moduleKeys []string + for _, rc := range changes.Resources { + if rc.Action == plans.NoOp { + // We won't mention no-op changes here at all, since the old plan + // model we are emulating here didn't have such a concept. + continue + } + moduleKey := rc.Addr.Module.String() + if _, exists := byModule[moduleKey]; !exists { + moduleKeys = append(moduleKeys, moduleKey) + byModule[moduleKey] = make(map[string]*ResourceChanges) + } + resourceKey := rc.Addr.Resource.String() + if _, exists := byModule[moduleKey][resourceKey]; !exists { + resourceKeys[moduleKey] = append(resourceKeys[moduleKey], resourceKey) + byModule[moduleKey][resourceKey] = &ResourceChanges{ + Deposed: make(map[states.DeposedKey]*plans.ResourceInstanceChangeSrc), + } + } + + if rc.DeposedKey == states.NotDeposed { + byModule[moduleKey][resourceKey].Current = rc + } else { + byModule[moduleKey][resourceKey].Deposed[rc.DeposedKey] = rc + } + + rr := []string{} + for _, p := range rc.RequiredReplace.List() { + rr = append(rr, hcl2shim.FlatmapKeyFromPath(p)) + } + requiresReplace[resourceKey] = rr + } + sort.Strings(moduleKeys) + for _, ks := range resourceKeys { + sort.Strings(ks) + } + + var buf bytes.Buffer + + for _, moduleKey := range moduleKeys { + rcs := byModule[moduleKey] + var mBuf bytes.Buffer + + for _, resourceKey := range resourceKeys[moduleKey] { + rc := rcs[resourceKey] + + forceNewAttrs := requiresReplace[resourceKey] + + crud := "UPDATE" + if rc.Current != nil { + switch rc.Current.Action { + case plans.DeleteThenCreate: + crud = "DESTROY/CREATE" + case plans.CreateThenDelete: + crud = "CREATE/DESTROY" + case plans.Delete: + crud = "DESTROY" + case plans.Create: + crud = "CREATE" + } + } else { + // We must be working on a deposed object then, in which + // case destroying is the only possible action. + crud = "DESTROY" + } + + extra := "" + if rc.Current == nil && len(rc.Deposed) > 0 { + extra = " (deposed only)" + } + + fmt.Fprintf( + &mBuf, "%s: %s%s\n", + crud, resourceKey, extra, + ) + + attrNames := map[string]bool{} + var oldAttrs map[string]string + var newAttrs map[string]string + if rc.Current != nil { + if before := rc.Current.Before; before != nil { + ty, err := before.ImpliedType() + if err == nil { + val, err := before.Decode(ty) + if err == nil { + oldAttrs = hcl2shim.FlatmapValueFromHCL2(val) + for k := range oldAttrs { + attrNames[k] = true + } + } + } + } + if after := rc.Current.After; after != nil { + ty, err := after.ImpliedType() + if err == nil { + val, err := after.Decode(ty) + if err == nil { + newAttrs = hcl2shim.FlatmapValueFromHCL2(val) + for k := range newAttrs { + attrNames[k] = true + } + } + } + } + } + if oldAttrs == nil { + oldAttrs = make(map[string]string) + } + if newAttrs == nil { + newAttrs = make(map[string]string) + } + + attrNamesOrder := make([]string, 0, len(attrNames)) + keyLen := 0 + for n := range attrNames { + attrNamesOrder = append(attrNamesOrder, n) + if len(n) > keyLen { + keyLen = len(n) + } + } + sort.Strings(attrNamesOrder) + + for _, attrK := range attrNamesOrder { + v := newAttrs[attrK] + u := oldAttrs[attrK] + + if v == config.UnknownVariableValue { + v = "" + } + // NOTE: we don't support here because we would + // need schema to do that. Excluding sensitive values + // is now done at the UI layer, and so should not be tested + // at the core layer. + + updateMsg := "" + + // This may not be as precise as in the old diff, as it matches + // everything under the attribute that was originally marked as + // ForceNew, but should help make it easier to determine what + // caused replacement here. + for _, k := range forceNewAttrs { + if strings.HasPrefix(attrK, k) { + updateMsg = " (forces new resource)" + break + } + } + + fmt.Fprintf( + &mBuf, " %s:%s %#v => %#v%s\n", + attrK, + strings.Repeat(" ", keyLen-len(attrK)), + u, v, + updateMsg, + ) + } + } + + if moduleKey == "" { // root module + buf.Write(mBuf.Bytes()) + buf.WriteByte('\n') + continue + } + + fmt.Fprintf(&buf, "%s:\n", moduleKey) + s := bufio.NewScanner(&mBuf) + for s.Scan() { + buf.WriteString(fmt.Sprintf(" %s\n", s.Text())) + } + } + + return buf.String() +} + func testStepTaint(state *terraform.State, step TestStep) error { for _, p := range step.Taint { m := state.RootModule() diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go index 94fef3cfbe..e1b7aea81a 100644 --- a/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go +++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go @@ -7,6 +7,12 @@ import ( "strings" "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/states" "github.com/hashicorp/terraform/terraform" ) @@ -15,6 +21,7 @@ func testStepImportState( opts terraform.ContextOpts, state *terraform.State, step TestStep) (*terraform.State, error) { + // Determine the ID to import var importId string switch { @@ -41,33 +48,53 @@ func testStepImportState( // Setup the context. We initialize with an empty state. We use the // full config for provider configurations. - mod, err := testModule(opts, step) + cfg, err := testConfig(opts, step) if err != nil { return state, err } - opts.Module = mod - opts.State = terraform.NewState() - ctx, err := terraform.NewContext(&opts) - if err != nil { - return state, err + opts.Config = cfg + + // import tests start with empty state + opts.State = states.NewState() + + ctx, stepDiags := terraform.NewContext(&opts) + if stepDiags.HasErrors() { + return state, stepDiags.Err() } - // Do the import! - newState, err := ctx.Import(&terraform.ImportOpts{ + // The test step provides the resource address as a string, so we need + // to parse it to get an addrs.AbsResourceAddress to pass in to the + // import method. + traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(step.ResourceName), "", hcl.Pos{}) + if hclDiags.HasErrors() { + return nil, hclDiags + } + importAddr, stepDiags := addrs.ParseAbsResourceInstance(traversal) + if stepDiags.HasErrors() { + return nil, stepDiags.Err() + } + + // Do the import + importedState, stepDiags := ctx.Import(&terraform.ImportOpts{ // Set the module so that any provider config is loaded - Module: mod, + Config: cfg, Targets: []*terraform.ImportTarget{ &terraform.ImportTarget{ - Addr: step.ResourceName, + Addr: importAddr, ID: importId, }, }, }) + if stepDiags.HasErrors() { + log.Printf("[ERROR] Test: ImportState failure: %s", stepDiags.Err()) + return state, stepDiags.Err() + } + + newState, err := shimNewState(importedState, step.providers) if err != nil { - log.Printf("[ERROR] Test: ImportState failure: %s", err) - return state, err + return nil, err } // Go through the new state and verify @@ -75,7 +102,9 @@ func testStepImportState( var states []*terraform.InstanceState for _, r := range newState.RootModule().Resources { if r.Primary != nil { - states = append(states, r.Primary) + is := r.Primary.DeepCopy() + is.Ephemeral.Type = r.Type // otherwise the check function cannot see the type + states = append(states, is) } } if err := step.ImportStateCheck(states); err != nil { @@ -102,30 +131,84 @@ func testStepImportState( r.Primary.ID) } + // We'll try our best to find the schema for this resource type + // so we can ignore Removed fields during validation. If we fail + // to find the schema then we won't ignore them and so the test + // will need to rely on explicit ImportStateVerifyIgnore, though + // this shouldn't happen in any reasonable case. + var rsrcSchema *schema.Resource + if providerAddr, diags := addrs.ParseAbsProviderConfigStr(r.Provider); !diags.HasErrors() { + providerType := providerAddr.ProviderConfig.Type + if provider, ok := step.providers[providerType]; ok { + if provider, ok := provider.(*schema.Provider); ok { + rsrcSchema = provider.ResourcesMap[r.Type] + } + } + } + + // don't add empty flatmapped containers, so we can more easily + // compare the attributes + skipEmpty := func(k, v string) bool { + if strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%") { + if v == "0" { + return true + } + } + return false + } + // Compare their attributes actual := make(map[string]string) for k, v := range r.Primary.Attributes { + if skipEmpty(k, v) { + continue + } actual[k] = v } + expected := make(map[string]string) for k, v := range oldR.Primary.Attributes { + if skipEmpty(k, v) { + continue + } expected[k] = v } // Remove fields we're ignoring for _, v := range step.ImportStateVerifyIgnore { - for k, _ := range actual { + for k := range actual { if strings.HasPrefix(k, v) { delete(actual, k) } } - for k, _ := range expected { + for k := range expected { if strings.HasPrefix(k, v) { delete(expected, k) } } } + // Also remove any attributes that are marked as "Removed" in the + // schema, if we have a schema to check that against. + if rsrcSchema != nil { + for k := range actual { + for _, schema := range rsrcSchema.SchemasForFlatmapPath(k) { + if schema.Removed != "" { + delete(actual, k) + break + } + } + } + for k := range expected { + for _, schema := range rsrcSchema.SchemasForFlatmapPath(k) { + if schema.Removed != "" { + delete(expected, k) + break + } + } + } + } + if !reflect.DeepEqual(actual, expected) { // Determine only the different attributes for k, v := range expected { diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/backend.go b/vendor/github.com/hashicorp/terraform/helper/schema/backend.go index 57fbba7447..c8d8ae28c6 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/backend.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/backend.go @@ -2,8 +2,15 @@ package schema import ( "context" + "fmt" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/config/hcl2shim" + "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/terraform" + ctyconvert "github.com/zclconf/go-cty/cty/convert" ) // Backend represents a partial backend.Backend implementation and simplifies @@ -38,41 +45,123 @@ func FromContextBackendConfig(ctx context.Context) *ResourceData { return ctx.Value(backendConfigKey).(*ResourceData) } -func (b *Backend) Input( - input terraform.UIInput, - c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { +func (b *Backend) ConfigSchema() *configschema.Block { + // This is an alias of CoreConfigSchema just to implement the + // backend.Backend interface. + return b.CoreConfigSchema() +} + +func (b *Backend) PrepareConfig(configVal cty.Value) (cty.Value, tfdiags.Diagnostics) { if b == nil { - return c, nil + return configVal, nil } + var diags tfdiags.Diagnostics + var err error - return schemaMap(b.Schema).Input(input, c) -} + // In order to use Transform below, this needs to be filled out completely + // according the schema. + configVal, err = b.CoreConfigSchema().CoerceValue(configVal) + if err != nil { + return configVal, diags.Append(err) + } -func (b *Backend) Validate(c *terraform.ResourceConfig) ([]string, []error) { - if b == nil { - return nil, nil + // lookup any required, top-level attributes that are Null, and see if we + // have a Default value available. + configVal, err = cty.Transform(configVal, func(path cty.Path, val cty.Value) (cty.Value, error) { + // we're only looking for top-level attributes + if len(path) != 1 { + return val, nil + } + + // nothing to do if we already have a value + if !val.IsNull() { + return val, nil + } + + // get the Schema definition for this attribute + getAttr, ok := path[0].(cty.GetAttrStep) + // these should all exist, but just ignore anything strange + if !ok { + return val, nil + } + + attrSchema := b.Schema[getAttr.Name] + // continue to ignore anything that doesn't match + if attrSchema == nil { + return val, nil + } + + // this is deprecated, so don't set it + if attrSchema.Deprecated != "" || attrSchema.Removed != "" { + return val, nil + } + + // find a default value if it exists + def, err := attrSchema.DefaultValue() + if err != nil { + diags = diags.Append(fmt.Errorf("error getting default for %q: %s", getAttr.Name, err)) + return val, err + } + + // no default + if def == nil { + return val, nil + } + + // create a cty.Value and make sure it's the correct type + tmpVal := hcl2shim.HCL2ValueFromConfigValue(def) + + // helper/schema used to allow setting "" to a bool + if val.Type() == cty.Bool && tmpVal.RawEquals(cty.StringVal("")) { + // return a warning about the conversion + diags = diags.Append("provider set empty string as default value for bool " + getAttr.Name) + tmpVal = cty.False + } + + val, err = ctyconvert.Convert(tmpVal, val.Type()) + if err != nil { + diags = diags.Append(fmt.Errorf("error setting default for %q: %s", getAttr.Name, err)) + } + + return val, err + }) + if err != nil { + // any error here was already added to the diagnostics + return configVal, diags } - return schemaMap(b.Schema).Validate(c) + shimRC := b.shimConfig(configVal) + warns, errs := schemaMap(b.Schema).Validate(shimRC) + for _, warn := range warns { + diags = diags.Append(tfdiags.SimpleWarning(warn)) + } + for _, err := range errs { + diags = diags.Append(err) + } + return configVal, diags } -func (b *Backend) Configure(c *terraform.ResourceConfig) error { +func (b *Backend) Configure(obj cty.Value) tfdiags.Diagnostics { if b == nil { return nil } + var diags tfdiags.Diagnostics sm := schemaMap(b.Schema) + shimRC := b.shimConfig(obj) // Get a ResourceData for this configuration. To do this, we actually // generate an intermediary "diff" although that is never exposed. - diff, err := sm.Diff(nil, c, nil, nil) + diff, err := sm.Diff(nil, shimRC, nil, nil, true) if err != nil { - return err + diags = diags.Append(err) + return diags } data, err := sm.Data(nil, diff) if err != nil { - return err + diags = diags.Append(err) + return diags } b.config = data @@ -80,11 +169,28 @@ func (b *Backend) Configure(c *terraform.ResourceConfig) error { err = b.ConfigureFunc(context.WithValue( context.Background(), backendConfigKey, data)) if err != nil { - return err + diags = diags.Append(err) + return diags } } - return nil + return diags +} + +// shimConfig turns a new-style cty.Value configuration (which must be of +// an object type) into a minimal old-style *terraform.ResourceConfig object +// that should be populated enough to appease the not-yet-updated functionality +// in this package. This should be removed once everything is updated. +func (b *Backend) shimConfig(obj cty.Value) *terraform.ResourceConfig { + shimMap, ok := hcl2shim.ConfigValueFromHCL2(obj).(map[string]interface{}) + if !ok { + // If the configVal was nil, we still want a non-nil map here. + shimMap = map[string]interface{}{} + } + return &terraform.ResourceConfig{ + Config: shimMap, + Raw: shimMap, + } } // Config returns the configuration. This is available after Configure is diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/core_schema.go b/vendor/github.com/hashicorp/terraform/helper/schema/core_schema.go index bf952f6631..d563ab8cab 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/core_schema.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/core_schema.go @@ -3,7 +3,7 @@ package schema import ( "fmt" - "github.com/hashicorp/terraform/config/configschema" + "github.com/hashicorp/terraform/configs/configschema" "github.com/zclconf/go-cty/cty" ) @@ -39,14 +39,42 @@ func (m schemaMap) CoreConfigSchema() *configschema.Block { ret.Attributes[name] = schema.coreConfigSchemaAttribute() continue } - switch schema.Elem.(type) { - case *Schema: + if schema.Type == TypeMap { + // For TypeMap in particular, it isn't valid for Elem to be a + // *Resource (since that would be ambiguous in flatmap) and + // so Elem is treated as a TypeString schema if so. This matches + // how the field readers treat this situation, for compatibility + // with configurations targeting Terraform 0.11 and earlier. + if _, isResource := schema.Elem.(*Resource); isResource { + sch := *schema // shallow copy + sch.Elem = &Schema{ + Type: TypeString, + } + ret.Attributes[name] = sch.coreConfigSchemaAttribute() + continue + } + } + switch schema.ConfigMode { + case SchemaConfigModeAttr: ret.Attributes[name] = schema.coreConfigSchemaAttribute() - case *Resource: + case SchemaConfigModeBlock: ret.BlockTypes[name] = schema.coreConfigSchemaBlock() - default: - // Should never happen for a valid schema - panic(fmt.Errorf("invalid Schema.Elem %#v; need *Schema or *Resource", schema.Elem)) + default: // SchemaConfigModeAuto, or any other invalid value + if schema.Computed && !schema.Optional { + // Computed-only schemas are always handled as attributes, + // because they never appear in configuration. + ret.Attributes[name] = schema.coreConfigSchemaAttribute() + continue + } + switch schema.Elem.(type) { + case *Schema, ValueType: + ret.Attributes[name] = schema.coreConfigSchemaAttribute() + case *Resource: + ret.BlockTypes[name] = schema.coreConfigSchemaBlock() + default: + // Should never happen for a valid schema + panic(fmt.Errorf("invalid Schema.Elem %#v; need *Schema or *Resource", schema.Elem)) + } } } @@ -58,12 +86,42 @@ func (m schemaMap) CoreConfigSchema() *configschema.Block { // Elem is an instance of Schema. Use coreConfigSchemaBlock for collections // whose elem is a whole resource. func (s *Schema) coreConfigSchemaAttribute() *configschema.Attribute { + // The Schema.DefaultFunc capability adds some extra weirdness here since + // it can be combined with "Required: true" to create a sitution where + // required-ness is conditional. Terraform Core doesn't share this concept, + // so we must sniff for this possibility here and conditionally turn + // off the "Required" flag if it looks like the DefaultFunc is going + // to provide a value. + // This is not 100% true to the original interface of DefaultFunc but + // works well enough for the EnvDefaultFunc and MultiEnvDefaultFunc + // situations, which are the main cases we care about. + // + // Note that this also has a consequence for commands that return schema + // information for documentation purposes: running those for certain + // providers will produce different results depending on which environment + // variables are set. We accept that weirdness in order to keep this + // interface to core otherwise simple. + reqd := s.Required + opt := s.Optional + if reqd && s.DefaultFunc != nil { + v, err := s.DefaultFunc() + // We can't report errors from here, so we'll instead just force + // "Required" to false and let the provider try calling its + // DefaultFunc again during the validate step, where it can then + // return the error. + if err != nil || (err == nil && v != nil) { + reqd = false + opt = true + } + } + return &configschema.Attribute{ - Type: s.coreConfigSchemaType(), - Optional: s.Optional, - Required: s.Required, - Computed: s.Computed, - Sensitive: s.Sensitive, + Type: s.coreConfigSchemaType(), + Optional: opt, + Required: reqd, + Computed: s.Computed, + Sensitive: s.Sensitive, + Description: s.Description, } } @@ -72,7 +130,7 @@ func (s *Schema) coreConfigSchemaAttribute() *configschema.Attribute { // of Resource, and will panic otherwise. func (s *Schema) coreConfigSchemaBlock() *configschema.NestedBlock { ret := &configschema.NestedBlock{} - if nested := s.Elem.(*Resource).CoreConfigSchema(); nested != nil { + if nested := s.Elem.(*Resource).coreConfigSchema(); nested != nil { ret.Block = *nested } switch s.Type { @@ -95,6 +153,20 @@ func (s *Schema) coreConfigSchemaBlock() *configschema.NestedBlock { // blocks, but we can fake it by requiring at least one item. ret.MinItems = 1 } + if s.Optional && s.MinItems > 0 { + // Historically helper/schema would ignore MinItems if Optional were + // set, so we must mimic this behavior here to ensure that providers + // relying on that undocumented behavior can continue to operate as + // they did before. + ret.MinItems = 0 + } + if s.Computed && !s.Optional { + // MinItems/MaxItems are meaningless for computed nested blocks, since + // they are never set by the user anyway. This ensures that we'll never + // generate weird errors about them. + ret.MinItems = 0 + ret.MaxItems = 0 + } return ret } @@ -102,6 +174,14 @@ func (s *Schema) coreConfigSchemaBlock() *configschema.NestedBlock { // coreConfigSchemaType determines the core config schema type that corresponds // to a particular schema's type. func (s *Schema) coreConfigSchemaType() cty.Type { + if s.SkipCoreTypeCheck { + // If we're preparing a schema for Terraform Core and the schema is + // asking us to skip the Core type-check then we'll tell core that this + // attribute is dynamically-typed, so it'll just pass through anything + // and let us validate it on the plugin side. + return cty.DynamicPseudoType + } + switch s.Type { case TypeString: return cty.String @@ -117,11 +197,16 @@ func (s *Schema) coreConfigSchemaType() cty.Type { switch set := s.Elem.(type) { case *Schema: elemType = set.coreConfigSchemaType() + case ValueType: + // This represents a mistake in the provider code, but it's a + // common one so we'll just shim it. + elemType = (&Schema{Type: set}).coreConfigSchemaType() case *Resource: - // In practice we don't actually use this for normal schema - // construction because we construct a NestedBlock in that - // case instead. See schemaMap.CoreConfigSchema. - elemType = set.CoreConfigSchema().ImpliedType() + // By default we construct a NestedBlock in this case, but this + // behavior is selected either for computed-only schemas or + // when ConfigMode is explicitly SchemaConfigModeBlock. + // See schemaMap.CoreConfigSchema for the exact rules. + elemType = set.coreConfigSchema().ImpliedType() default: if set != nil { // Should never happen for a valid schema @@ -148,8 +233,85 @@ func (s *Schema) coreConfigSchemaType() cty.Type { } } -// CoreConfigSchema is a convenient shortcut for calling CoreConfigSchema -// on the resource's schema. +// CoreConfigSchema is a convenient shortcut for calling CoreConfigSchema on +// the resource's schema. CoreConfigSchema adds the implicitly required "id" +// attribute for top level resources if it doesn't exist. func (r *Resource) CoreConfigSchema() *configschema.Block { + block := r.coreConfigSchema() + + if block.Attributes == nil { + block.Attributes = map[string]*configschema.Attribute{} + } + + // Add the implicitly required "id" field if it doesn't exist + if block.Attributes["id"] == nil { + block.Attributes["id"] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + Computed: true, + } + } + + _, timeoutsAttr := block.Attributes[TimeoutsConfigKey] + _, timeoutsBlock := block.BlockTypes[TimeoutsConfigKey] + + // Insert configured timeout values into the schema, as long as the schema + // didn't define anything else by that name. + if r.Timeouts != nil && !timeoutsAttr && !timeoutsBlock { + timeouts := configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + } + + if r.Timeouts.Create != nil { + timeouts.Attributes[TimeoutCreate] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + if r.Timeouts.Read != nil { + timeouts.Attributes[TimeoutRead] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + if r.Timeouts.Update != nil { + timeouts.Attributes[TimeoutUpdate] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + if r.Timeouts.Delete != nil { + timeouts.Attributes[TimeoutDelete] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + if r.Timeouts.Default != nil { + timeouts.Attributes[TimeoutDefault] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + block.BlockTypes[TimeoutsConfigKey] = &configschema.NestedBlock{ + Nesting: configschema.NestingSingle, + Block: timeouts, + } + } + + return block +} + +func (r *Resource) coreConfigSchema() *configschema.Block { + return schemaMap(r.Schema).CoreConfigSchema() +} + +// CoreConfigSchema is a convenient shortcut for calling CoreConfigSchema +// on the backends's schema. +func (r *Backend) CoreConfigSchema() *configschema.Block { return schemaMap(r.Schema).CoreConfigSchema() } diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go index b80b223a29..2a66a068fb 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go @@ -3,6 +3,7 @@ package schema import ( "fmt" "strconv" + "strings" ) // FieldReaders are responsible for decoding fields out of data into @@ -41,6 +42,13 @@ func (r *FieldReadResult) ValueOrZero(s *Schema) interface{} { return s.ZeroValue() } +// SchemasForFlatmapPath tries its best to find a sequence of schemas that +// the given dot-delimited attribute path traverses through. +func SchemasForFlatmapPath(path string, schemaMap map[string]*Schema) []*Schema { + parts := strings.Split(path, ".") + return addrToSchema(parts, schemaMap) +} + // addrToSchema finds the final element schema for the given address // and the given schema. It returns all the schemas that led to the final // schema. These are in order of the address (out to in). diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go index d558a5bc92..ae35b4a876 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go @@ -174,6 +174,9 @@ func (r *DiffFieldReader) readPrimitive( func (r *DiffFieldReader) readSet( address []string, schema *Schema) (FieldReadResult, error) { + // copy address to ensure we don't modify the argument + address = append([]string(nil), address...) + prefix := strings.Join(address, ".") + "." // Create the set that will be our result diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go index 054efe08fd..53f73b71bb 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go @@ -98,6 +98,9 @@ func (r *MapFieldReader) readPrimitive( func (r *MapFieldReader) readSet( address []string, schema *Schema) (FieldReadResult, error) { + // copy address to ensure we don't modify the argument + address = append([]string(nil), address...) + // Get the number of elements in the list countRaw, err := r.readPrimitive( append(address, "#"), &Schema{Type: TypeInt}) diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go index 814c7ba8e3..c09358b1bb 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go @@ -297,13 +297,14 @@ func (w *MapFieldWriter) setSet( // we get the proper order back based on the hash code. if v := reflect.ValueOf(value); v.Kind() == reflect.Slice { // Build a temp *ResourceData to use for the conversion + tempAddr := addr[len(addr)-1:] tempSchema := *schema tempSchema.Type = TypeList - tempSchemaMap := map[string]*Schema{addr[0]: &tempSchema} + tempSchemaMap := map[string]*Schema{tempAddr[0]: &tempSchema} tempW := &MapFieldWriter{Schema: tempSchemaMap} // Set the entire list, this lets us get sane values out of it - if err := tempW.WriteField(addr, value); err != nil { + if err := tempW.WriteField(tempAddr, value); err != nil { return err } @@ -319,7 +320,7 @@ func (w *MapFieldWriter) setSet( } for i := 0; i < v.Len(); i++ { is := strconv.FormatInt(int64(i), 10) - result, err := tempR.ReadField(append(addrCopy, is)) + result, err := tempR.ReadField(append(tempAddr, is)) if err != nil { return err } @@ -340,6 +341,11 @@ func (w *MapFieldWriter) setSet( // problems when the old data isn't wiped first. w.clearTree(addr) + if value.(*Set) == nil { + w.result[k+".#"] = "0" + return nil + } + for code, elem := range value.(*Set).m { if err := w.set(append(addrCopy, code), elem); err != nil { return err diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/provider.go b/vendor/github.com/hashicorp/terraform/helper/schema/provider.go index 6cd325daf4..97024479d8 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/provider.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/provider.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/configschema" + "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/terraform" ) @@ -64,6 +64,8 @@ type Provider struct { stopCtx context.Context stopCtxCancel context.CancelFunc stopOnce sync.Once + + TerraformVersion string } // ConfigureFunc is the function used to configure a Provider. @@ -251,7 +253,7 @@ func (p *Provider) Configure(c *terraform.ResourceConfig) error { // Get a ResourceData for this configuration. To do this, we actually // generate an intermediary "diff" although that is never exposed. - diff, err := sm.Diff(nil, c, nil, p.meta) + diff, err := sm.Diff(nil, c, nil, p.meta, true) if err != nil { return err } @@ -296,6 +298,20 @@ func (p *Provider) Diff( return r.Diff(s, c, p.meta) } +// SimpleDiff is used by the new protocol wrappers to get a diff that doesn't +// attempt to calculate ignore_changes. +func (p *Provider) SimpleDiff( + info *terraform.InstanceInfo, + s *terraform.InstanceState, + c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { + r, ok := p.ResourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown resource type: %s", info.Type) + } + + return r.simpleDiff(s, c, p.meta) +} + // Refresh implementation of terraform.ResourceProvider interface. func (p *Provider) Refresh( info *terraform.InstanceInfo, @@ -311,7 +327,7 @@ func (p *Provider) Refresh( // Resources implementation of terraform.ResourceProvider interface. func (p *Provider) Resources() []terraform.ResourceType { keys := make([]string, 0, len(p.ResourcesMap)) - for k, _ := range p.ResourcesMap { + for k := range p.ResourcesMap { keys = append(keys, k) } sort.Strings(keys) diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go b/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go index a8d42dbd0b..637e221e1d 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/terraform" ) @@ -121,6 +122,11 @@ func (p *Provisioner) Stop() error { return nil } +// GetConfigSchema implementation of terraform.ResourceProvisioner interface. +func (p *Provisioner) GetConfigSchema() (*configschema.Block, error) { + return schemaMap(p.Schema).CoreConfigSchema(), nil +} + // Apply implementation of terraform.ResourceProvisioner interface. func (p *Provisioner) Apply( o terraform.UIOutput, @@ -146,7 +152,7 @@ func (p *Provisioner) Apply( } sm := schemaMap(p.ConnSchema) - diff, err := sm.Diff(nil, terraform.NewResourceConfig(c), nil, nil) + diff, err := sm.Diff(nil, terraform.NewResourceConfig(c), nil, nil, true) if err != nil { return err } @@ -160,7 +166,7 @@ func (p *Provisioner) Apply( // Build the configuration data. Doing this requires making a "diff" // even though that's never used. We use that just to get the correct types. configMap := schemaMap(p.Schema) - diff, err := configMap.Diff(nil, c, nil, nil) + diff, err := configMap.Diff(nil, c, nil, nil, true) if err != nil { return err } diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go index d3be2d6148..b5e3065745 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/resource.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/terraform" + "github.com/zclconf/go-cty/cty" ) // Resource represents a thing in Terraform that has a set of configurable @@ -44,6 +45,12 @@ type Resource struct { // their Versioning at any integer >= 1 SchemaVersion int + // MigrateState is deprecated and any new changes to a resource's schema + // should be handled by StateUpgraders. Existing MigrateState implementations + // should remain for compatibility with existing state. MigrateState will + // still be called if the stored SchemaVersion is less than the + // first version of the StateUpgraders. + // // MigrateState is responsible for updating an InstanceState with an old // version to the format expected by the current version of the Schema. // @@ -56,6 +63,18 @@ type Resource struct { // needs to make any remote API calls. MigrateState StateMigrateFunc + // StateUpgraders contains the functions responsible for upgrading an + // existing state with an old schema version to a newer schema. It is + // called specifically by Terraform when the stored schema version is less + // than the current SchemaVersion of the Resource. + // + // StateUpgraders map specific schema versions to a StateUpgrader + // function. The registered versions are expected to be ordered, + // consecutive values. The initial value may be greater than 0 to account + // for legacy schemas that weren't recorded and can be handled by + // MigrateState. + StateUpgraders []StateUpgrader + // The functions below are the CRUD operations for this resource. // // The only optional operation is Update. If Update is not implemented, @@ -136,6 +155,27 @@ type Resource struct { Timeouts *ResourceTimeout } +// ShimInstanceStateFromValue converts a cty.Value to a +// terraform.InstanceState. +func (r *Resource) ShimInstanceStateFromValue(state cty.Value) (*terraform.InstanceState, error) { + // Get the raw shimmed value. While this is correct, the set hashes don't + // match those from the Schema. + s := terraform.NewInstanceStateShimmedFromValue(state, r.SchemaVersion) + + // We now rebuild the state through the ResourceData, so that the set indexes + // match what helper/schema expects. + data, err := schemaMap(r.Schema).Data(s, nil) + if err != nil { + return nil, err + } + + s = data.State() + if s == nil { + s = &terraform.InstanceState{} + } + return s, nil +} + // See Resource documentation. type CreateFunc func(*ResourceData, interface{}) error @@ -155,6 +195,27 @@ type ExistsFunc func(*ResourceData, interface{}) (bool, error) type StateMigrateFunc func( int, *terraform.InstanceState, interface{}) (*terraform.InstanceState, error) +type StateUpgrader struct { + // Version is the version schema that this Upgrader will handle, converting + // it to Version+1. + Version int + + // Type describes the schema that this function can upgrade. Type is + // required to decode the schema if the state was stored in a legacy + // flatmap format. + Type cty.Type + + // Upgrade takes the JSON encoded state and the provider meta value, and + // upgrades the state one single schema version. The provided state is + // deocded into the default json types using a map[string]interface{}. It + // is up to the StateUpgradeFunc to ensure that the returned value can be + // encoded using the new schema. + Upgrade StateUpgradeFunc +} + +// See StateUpgrader +type StateUpgradeFunc func(rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) + // See Resource documentation. type CustomizeDiffFunc func(*ResourceDiff, interface{}) error @@ -247,7 +308,7 @@ func (r *Resource) Diff( return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err) } - instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta) + instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta, true) if err != nil { return instanceDiff, err } @@ -263,6 +324,45 @@ func (r *Resource) Diff( return instanceDiff, err } +func (r *Resource) simpleDiff( + s *terraform.InstanceState, + c *terraform.ResourceConfig, + meta interface{}) (*terraform.InstanceDiff, error) { + + t := &ResourceTimeout{} + err := t.ConfigDecode(r, c) + + if err != nil { + return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err) + } + + instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta, false) + if err != nil { + return instanceDiff, err + } + + if instanceDiff == nil { + log.Printf("[DEBUG] Instance Diff is nil in SimpleDiff()") + return nil, err + } + + // Make sure the old value is set in each of the instance diffs. + // This was done by the RequiresNew logic in the full legacy Diff. + for k, attr := range instanceDiff.Attributes { + if attr == nil { + continue + } + if s != nil { + attr.Old = s.Attributes[k] + } + } + + if err := t.DiffEncode(instanceDiff); err != nil { + log.Printf("[ERR] Error encoding timeout to instance diff: %s", err) + } + return instanceDiff, err +} + // Validate validates the resource configuration against the schema. func (r *Resource) Validate(c *terraform.ResourceConfig) ([]string, []error) { warns, errs := schemaMap(r.Schema).Validate(c) @@ -300,8 +400,11 @@ func (r *Resource) ReadDataApply( return r.recordCurrentSchemaVersion(state), err } -// Refresh refreshes the state of the resource. -func (r *Resource) Refresh( +// RefreshWithoutUpgrade reads the instance state, but does not call +// MigrateState or the StateUpgraders, since those are now invoked in a +// separate API call. +// RefreshWithoutUpgrade is part of the new plugin shims. +func (r *Resource) RefreshWithoutUpgrade( s *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { // If the ID is already somehow blank, it doesn't exist @@ -335,12 +438,60 @@ func (r *Resource) Refresh( } } - needsMigration, stateSchemaVersion := r.checkSchemaVersion(s) - if needsMigration && r.MigrateState != nil { - s, err := r.MigrateState(stateSchemaVersion, s, meta) + data, err := schemaMap(r.Schema).Data(s, nil) + data.timeouts = &rt + if err != nil { + return s, err + } + + err = r.Read(data, meta) + state := data.State() + if state != nil && state.ID == "" { + state = nil + } + + return r.recordCurrentSchemaVersion(state), err +} + +// Refresh refreshes the state of the resource. +func (r *Resource) Refresh( + s *terraform.InstanceState, + meta interface{}) (*terraform.InstanceState, error) { + // If the ID is already somehow blank, it doesn't exist + if s.ID == "" { + return nil, nil + } + + rt := ResourceTimeout{} + if _, ok := s.Meta[TimeoutKey]; ok { + if err := rt.StateDecode(s); err != nil { + log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + } + } + + if r.Exists != nil { + // Make a copy of data so that if it is modified it doesn't + // affect our Read later. + data, err := schemaMap(r.Schema).Data(s, nil) + data.timeouts = &rt + if err != nil { return s, err } + + exists, err := r.Exists(data, meta) + if err != nil { + return s, err + } + if !exists { + return nil, nil + } + } + + // there may be new StateUpgraders that need to be run + s, err := r.upgradeState(s, meta) + if err != nil { + return s, err } data, err := schemaMap(r.Schema).Data(s, nil) @@ -358,6 +509,71 @@ func (r *Resource) Refresh( return r.recordCurrentSchemaVersion(state), err } +func (r *Resource) upgradeState(s *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + var err error + + needsMigration, stateSchemaVersion := r.checkSchemaVersion(s) + migrate := needsMigration && r.MigrateState != nil + + if migrate { + s, err = r.MigrateState(stateSchemaVersion, s, meta) + if err != nil { + return s, err + } + } + + if len(r.StateUpgraders) == 0 { + return s, nil + } + + // If we ran MigrateState, then the stateSchemaVersion value is no longer + // correct. We can expect the first upgrade function to be the correct + // schema type version. + if migrate { + stateSchemaVersion = r.StateUpgraders[0].Version + } + + schemaType := r.CoreConfigSchema().ImpliedType() + // find the expected type to convert the state + for _, upgrader := range r.StateUpgraders { + if stateSchemaVersion == upgrader.Version { + schemaType = upgrader.Type + } + } + + // StateUpgraders only operate on the new JSON format state, so the state + // need to be converted. + stateVal, err := StateValueFromInstanceState(s, schemaType) + if err != nil { + return nil, err + } + + jsonState, err := StateValueToJSONMap(stateVal, schemaType) + if err != nil { + return nil, err + } + + for _, upgrader := range r.StateUpgraders { + if stateSchemaVersion != upgrader.Version { + continue + } + + jsonState, err = upgrader.Upgrade(jsonState, meta) + if err != nil { + return nil, err + } + stateSchemaVersion++ + } + + // now we need to re-flatmap the new state + stateVal, err = JSONMapToStateValue(jsonState, r.CoreConfigSchema()) + if err != nil { + return nil, err + } + + return r.ShimInstanceStateFromValue(stateVal) +} + // InternalValidate should be called to validate the structure // of the resource. // @@ -437,6 +653,31 @@ func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error } } + lastVersion := -1 + for _, u := range r.StateUpgraders { + if lastVersion >= 0 && u.Version-lastVersion > 1 { + return fmt.Errorf("missing schema version between %d and %d", lastVersion, u.Version) + } + + if u.Version >= r.SchemaVersion { + return fmt.Errorf("StateUpgrader version %d is >= current version %d", u.Version, r.SchemaVersion) + } + + if !u.Type.IsObjectType() { + return fmt.Errorf("StateUpgrader %d type is not cty.Object", u.Version) + } + + if u.Upgrade == nil { + return fmt.Errorf("StateUpgrader %d missing StateUpgradeFunc", u.Version) + } + + lastVersion = u.Version + } + + if lastVersion >= 0 && lastVersion != r.SchemaVersion-1 { + return fmt.Errorf("missing StateUpgrader between %d and %d", lastVersion, r.SchemaVersion) + } + // Data source if r.isTopLevel() && !writable { tsm = schemaMap(r.Schema) @@ -513,6 +754,13 @@ func (r *Resource) TestResourceData() *ResourceData { } } +// SchemasForFlatmapPath tries its best to find a sequence of schemas that +// the given dot-delimited attribute path traverses through in the schema +// of the receiving Resource. +func (r *Resource) SchemasForFlatmapPath(path string) []*Schema { + return SchemasForFlatmapPath(path, r.Schema) +} + // Returns true if the resource is "top level" i.e. not a sub-resource. func (r *Resource) isTopLevel() bool { // TODO: This is a heuristic; replace with a definitive attribute? @@ -538,7 +786,15 @@ func (r *Resource) checkSchemaVersion(is *terraform.InstanceState) (bool, int) { } stateSchemaVersion, _ := strconv.Atoi(rawString) - return stateSchemaVersion < r.SchemaVersion, stateSchemaVersion + + // Don't run MigrateState if the version is handled by a StateUpgrader, + // since StateMigrateFuncs are not required to handle unknown versions + maxVersion := r.SchemaVersion + if len(r.StateUpgraders) > 0 { + maxVersion = r.StateUpgraders[0].Version + } + + return stateSchemaVersion < maxVersion, stateSchemaVersion } func (r *Resource) recordCurrentSchemaVersion( diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go index 6cc01ee0bd..1c390709e8 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go @@ -52,6 +52,8 @@ type getResult struct { // UnsafeSetFieldRaw allows setting arbitrary values in state to arbitrary // values, bypassing schema. This MUST NOT be used in normal circumstances - // it exists only to support the remote_state data source. +// +// Deprecated: Fully define schema attributes and use Set() instead. func (d *ResourceData) UnsafeSetFieldRaw(key string, value string) { d.once.Do(d.init) @@ -219,10 +221,16 @@ func (d *ResourceData) Id() string { if d.state != nil { result = d.state.ID + if result == "" { + result = d.state.Attributes["id"] + } } if d.newState != nil { result = d.newState.ID + if result == "" { + result = d.newState.Attributes["id"] + } } return result @@ -246,6 +254,18 @@ func (d *ResourceData) ConnInfo() map[string]string { func (d *ResourceData) SetId(v string) { d.once.Do(d.init) d.newState.ID = v + + // once we transition away from the legacy state types, "id" will no longer + // be a special field, and will become a normal attribute. + // set the attribute normally + d.setWriter.unsafeWriteField("id", v) + + // Make sure the newState is also set, otherwise the old value + // may get precedence. + if d.newState.Attributes == nil { + d.newState.Attributes = map[string]string{} + } + d.newState.Attributes["id"] = v } // SetConnInfo sets the connection info for a resource. diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go index 7db3decc5f..47b5481046 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go @@ -367,7 +367,7 @@ func (d *ResourceDiff) Get(key string) interface{} { } // GetChange gets the change between the state and diff, checking first to see -// if a overridden diff exists. +// if an overridden diff exists. // // This implementation differs from ResourceData's in the way that we first get // results from the exact levels for the new diff, then from state and diff as diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go index 445819f0f9..9e422c1a6f 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go @@ -5,6 +5,7 @@ import ( "log" "time" + "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/terraform" "github.com/mitchellh/copystructure" ) @@ -62,55 +63,70 @@ func (t *ResourceTimeout) ConfigDecode(s *Resource, c *terraform.ResourceConfig) } if raw, ok := c.Config[TimeoutsConfigKey]; ok { - if configTimeouts, ok := raw.([]map[string]interface{}); ok { - for _, timeoutValues := range configTimeouts { - // loop through each Timeout given in the configuration and validate they - // the Timeout defined in the resource - for timeKey, timeValue := range timeoutValues { - // validate that we're dealing with the normal CRUD actions - var found bool - for _, key := range timeoutKeys() { - if timeKey == key { - found = true - break - } - } + var rawTimeouts []map[string]interface{} + switch raw := raw.(type) { + case map[string]interface{}: + rawTimeouts = append(rawTimeouts, raw) + case []map[string]interface{}: + rawTimeouts = raw + case string: + if raw == config.UnknownVariableValue { + // Timeout is not defined in the config + // Defaults will be used instead + return nil + } else { + log.Printf("[ERROR] Invalid timeout value: %q", raw) + return fmt.Errorf("Invalid Timeout value found") + } + default: + log.Printf("[ERROR] Invalid timeout structure: %#v", raw) + return fmt.Errorf("Invalid Timeout structure found") + } - if !found { - return fmt.Errorf("Unsupported Timeout configuration key found (%s)", timeKey) + for _, timeoutValues := range rawTimeouts { + for timeKey, timeValue := range timeoutValues { + // validate that we're dealing with the normal CRUD actions + var found bool + for _, key := range timeoutKeys() { + if timeKey == key { + found = true + break } + } - // Get timeout - rt, err := time.ParseDuration(timeValue.(string)) - if err != nil { - return fmt.Errorf("Error parsing Timeout for (%s): %s", timeKey, err) - } + if !found { + return fmt.Errorf("Unsupported Timeout configuration key found (%s)", timeKey) + } - var timeout *time.Duration - switch timeKey { - case TimeoutCreate: - timeout = t.Create - case TimeoutUpdate: - timeout = t.Update - case TimeoutRead: - timeout = t.Read - case TimeoutDelete: - timeout = t.Delete - case TimeoutDefault: - timeout = t.Default - } + // Get timeout + rt, err := time.ParseDuration(timeValue.(string)) + if err != nil { + return fmt.Errorf("Error parsing %q timeout: %s", timeKey, err) + } - // If the resource has not delcared this in the definition, then error - // with an unsupported message - if timeout == nil { - return unsupportedTimeoutKeyError(timeKey) - } + var timeout *time.Duration + switch timeKey { + case TimeoutCreate: + timeout = t.Create + case TimeoutUpdate: + timeout = t.Update + case TimeoutRead: + timeout = t.Read + case TimeoutDelete: + timeout = t.Delete + case TimeoutDefault: + timeout = t.Default + } - *timeout = rt + // If the resource has not delcared this in the definition, then error + // with an unsupported message + if timeout == nil { + return unsupportedTimeoutKeyError(timeKey) } + + *timeout = rt } - } else { - log.Printf("[WARN] Invalid Timeout structure found, skipping timeouts") + return nil } } diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/schema.go b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go index 0ea5aad558..8cb9bc9f9d 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/schema.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go @@ -12,6 +12,7 @@ package schema import ( + "context" "fmt" "os" "reflect" @@ -19,7 +20,9 @@ import ( "sort" "strconv" "strings" + "sync" + "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/terraform" "github.com/mitchellh/copystructure" "github.com/mitchellh/mapstructure" @@ -31,6 +34,27 @@ const PanicOnErr = "TF_SCHEMA_PANIC_ON_ERROR" // type used for schema package context keys type contextKey string +var ( + protoVersionMu sync.Mutex + protoVersion5 = false +) + +func isProto5() bool { + protoVersionMu.Lock() + defer protoVersionMu.Unlock() + return protoVersion5 + +} + +// SetProto5 enables a feature flag for any internal changes required required +// to work with the new plugin protocol. This should not be called by +// provider. +func SetProto5() { + protoVersionMu.Lock() + defer protoVersionMu.Unlock() + protoVersion5 = true +} + // Schema is used to describe the structure of a value. // // Read the documentation of the struct elements for important details. @@ -51,6 +75,54 @@ type Schema struct { // Type ValueType + // ConfigMode allows for overriding the default behaviors for mapping + // schema entries onto configuration constructs. + // + // By default, the Elem field is used to choose whether a particular + // schema is represented in configuration as an attribute or as a nested + // block; if Elem is a *schema.Resource then it's a block and it's an + // attribute otherwise. + // + // If Elem is *schema.Resource then setting ConfigMode to + // SchemaConfigModeAttr will force it to be represented in configuration + // as an attribute, which means that the Computed flag can be used to + // provide default elements when the argument isn't set at all, while still + // allowing the user to force zero elements by explicitly assigning an + // empty list. + // + // When Computed is set without Optional, the attribute is not settable + // in configuration at all and so SchemaConfigModeAttr is the automatic + // behavior, and SchemaConfigModeBlock is not permitted. + ConfigMode SchemaConfigMode + + // SkipCoreTypeCheck, if set, will advertise this attribute to Terraform Core + // has being dynamically-typed rather than deriving a type from the schema. + // This has the effect of making Terraform Core skip all type-checking of + // the value, and thus leaves all type checking up to a combination of this + // SDK and the provider's own code. + // + // This flag does nothing for Terraform versions prior to v0.12, because + // in prior versions there was no Core-side typecheck anyway. + // + // The most practical effect of this flag is to allow object-typed schemas + // (specified with Elem: schema.Resource) to pass through Terraform Core + // even without all of the object type attributes specified, which may be + // useful when using ConfigMode: SchemaConfigModeAttr to achieve + // nested-block-like behaviors while using attribute syntax. + // + // However, by doing so we require type information to be sent and stored + // per-object rather than just once statically in the schema, and so this + // will change the wire serialization of a resource type in state. Changing + // the value of SkipCoreTypeCheck will therefore require a state migration + // if there has previously been any release of the provider compatible with + // Terraform v0.12. + // + // SkipCoreTypeCheck can only be set when ConfigMode is SchemaConfigModeAttr, + // because nested blocks cannot be decoded by Terraform Core without at + // least shallow information about the next level of nested attributes and + // blocks. + SkipCoreTypeCheck bool + // If one of these is set, then this item can come from the configuration. // Both cannot be set. If Optional is set, the value is optional. If // Required is set, the value is required. @@ -123,7 +195,8 @@ type Schema struct { // The following fields are only set for a TypeList, TypeSet, or TypeMap. // // Elem represents the element type. For a TypeMap, it must be a *Schema - // with a Type of TypeString, otherwise it may be either a *Schema or a + // with a Type that is one of the primitives: TypeString, TypeBool, + // TypeInt, or TypeFloat. Otherwise it may be either a *Schema or a // *Resource. If it is *Schema, the element type is just a simple value. // If it is *Resource, the element type is a complex structure, // potentially with its own lifecycle. @@ -141,13 +214,17 @@ type Schema struct { // used to wrap a complex structure, however less than one instance would // cause instability. // - // PromoteSingle, if true, will allow single elements to be standalone - // and promote them to a list. For example "foo" would be promoted to - // ["foo"] automatically. This is primarily for legacy reasons and the - // ambiguity is not recommended for new usage. Promotion is only allowed - // for primitive element types. - MaxItems int - MinItems int + // If the field Optional is set to true then MinItems is ignored and thus + // effectively zero. + MaxItems int + MinItems int + + // PromoteSingle originally allowed for a single element to be assigned + // where a primitive list was expected, but this no longer works from + // Terraform v0.12 onwards (Terraform Core will require a list to be set + // regardless of what this is set to) and so only applies to Terraform v0.11 + // and earlier, and so should be used only to retain this functionality + // for those still using v0.11 with a provider that formerly used this. PromoteSingle bool // The following fields are only valid for a TypeSet type. @@ -189,7 +266,8 @@ type Schema struct { // guaranteed to be of the proper Schema type, and it can yield warnings or // errors based on inspection of that value. // - // ValidateFunc currently only works for primitive types. + // ValidateFunc is honored only when the schema's Type is set to TypeInt, + // TypeFloat, TypeString, TypeBool, or TypeMap. It is ignored for all other types. ValidateFunc SchemaValidateFunc // Sensitive ensures that the attribute's value does not get displayed in @@ -199,6 +277,17 @@ type Schema struct { Sensitive bool } +// SchemaConfigMode is used to influence how a schema item is mapped into a +// corresponding configuration construct, using the ConfigMode field of +// Schema. +type SchemaConfigMode int + +const ( + SchemaConfigModeAuto SchemaConfigMode = iota + SchemaConfigModeAttr + SchemaConfigModeBlock +) + // SchemaDiffSuppressFunc is a function which can be used to determine // whether a detected diff on a schema element is "valid" or not, and // suppress it from the plan if necessary. @@ -364,6 +453,11 @@ func (s *Schema) finalizeDiff(d *terraform.ResourceAttrDiff, customized bool) *t return d } +// InternalMap is used to aid in the transition to the new schema types and +// protocol. The name is not meant to convey any usefulness, as this is not to +// be used directly by any providers. +type InternalMap = schemaMap + // schemaMap is a wrapper that adds nice functions on top of schemas. type schemaMap map[string]*Schema @@ -404,7 +498,8 @@ func (m schemaMap) Diff( s *terraform.InstanceState, c *terraform.ResourceConfig, customizeDiff CustomizeDiffFunc, - meta interface{}) (*terraform.InstanceDiff, error) { + meta interface{}, + handleRequiresNew bool) (*terraform.InstanceDiff, error) { result := new(terraform.InstanceDiff) result.Attributes = make(map[string]*terraform.ResourceAttrDiff) @@ -450,82 +545,85 @@ func (m schemaMap) Diff( } } - // If the diff requires a new resource, then we recompute the diff - // so we have the complete new resource diff, and preserve the - // RequiresNew fields where necessary so the user knows exactly what - // caused that. - if result.RequiresNew() { - // Create the new diff - result2 := new(terraform.InstanceDiff) - result2.Attributes = make(map[string]*terraform.ResourceAttrDiff) - - // Preserve the DestroyTainted flag - result2.DestroyTainted = result.DestroyTainted + if handleRequiresNew { + // If the diff requires a new resource, then we recompute the diff + // so we have the complete new resource diff, and preserve the + // RequiresNew fields where necessary so the user knows exactly what + // caused that. + if result.RequiresNew() { + // Create the new diff + result2 := new(terraform.InstanceDiff) + result2.Attributes = make(map[string]*terraform.ResourceAttrDiff) - // Reset the data to not contain state. We have to call init() - // again in order to reset the FieldReaders. - d.state = nil - d.init() + // Preserve the DestroyTainted flag + result2.DestroyTainted = result.DestroyTainted - // Perform the diff again - for k, schema := range m { - err := m.diff(k, schema, result2, d, false) - if err != nil { - return nil, err - } - } + // Reset the data to not contain state. We have to call init() + // again in order to reset the FieldReaders. + d.state = nil + d.init() - // Re-run customization - if !result2.DestroyTainted && customizeDiff != nil { - mc := m.DeepCopy() - rd := newResourceDiff(mc, c, d.state, result2) - if err := customizeDiff(rd, meta); err != nil { - return nil, err - } - for _, k := range rd.UpdatedKeys() { - err := m.diff(k, mc[k], result2, rd, false) + // Perform the diff again + for k, schema := range m { + err := m.diff(k, schema, result2, d, false) if err != nil { return nil, err } } - } - // Force all the fields to not force a new since we know what we - // want to force new. - for k, attr := range result2.Attributes { - if attr == nil { - continue + // Re-run customization + if !result2.DestroyTainted && customizeDiff != nil { + mc := m.DeepCopy() + rd := newResourceDiff(mc, c, d.state, result2) + if err := customizeDiff(rd, meta); err != nil { + return nil, err + } + for _, k := range rd.UpdatedKeys() { + err := m.diff(k, mc[k], result2, rd, false) + if err != nil { + return nil, err + } + } } - if attr.RequiresNew { - attr.RequiresNew = false - } + // Force all the fields to not force a new since we know what we + // want to force new. + for k, attr := range result2.Attributes { + if attr == nil { + continue + } - if s != nil { - attr.Old = s.Attributes[k] - } - } + if attr.RequiresNew { + attr.RequiresNew = false + } - // Now copy in all the requires new diffs... - for k, attr := range result.Attributes { - if attr == nil { - continue + if s != nil { + attr.Old = s.Attributes[k] + } } - newAttr, ok := result2.Attributes[k] - if !ok { - newAttr = attr - } + // Now copy in all the requires new diffs... + for k, attr := range result.Attributes { + if attr == nil { + continue + } + + newAttr, ok := result2.Attributes[k] + if !ok { + newAttr = attr + } + + if attr.RequiresNew { + newAttr.RequiresNew = true + } - if attr.RequiresNew { - newAttr.RequiresNew = true + result2.Attributes[k] = newAttr } - result2.Attributes[k] = newAttr + // And set the diff! + result = result2 } - // And set the diff! - result = result2 } // Go through and detect all of the ComputedWhens now that we've @@ -611,6 +709,10 @@ func (m schemaMap) Validate(c *terraform.ResourceConfig) ([]string, []error) { // from a unit test (and not in user-path code) to verify that a schema // is properly built. func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error { + return m.internalValidate(topSchemaMap, false) +} + +func (m schemaMap) internalValidate(topSchemaMap schemaMap, attrsOnly bool) error { if topSchemaMap == nil { topSchemaMap = m } @@ -631,6 +733,42 @@ func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error { return fmt.Errorf("%s: One of optional, required, or computed must be set", k) } + computedOnly := v.Computed && !v.Optional + + isBlock := false + + switch v.ConfigMode { + case SchemaConfigModeBlock: + if _, ok := v.Elem.(*Resource); !ok { + return fmt.Errorf("%s: ConfigMode of block is allowed only when Elem is *schema.Resource", k) + } + if attrsOnly { + return fmt.Errorf("%s: ConfigMode of block cannot be used in child of schema with ConfigMode of attribute", k) + } + if computedOnly { + return fmt.Errorf("%s: ConfigMode of block cannot be used for computed schema", k) + } + isBlock = true + case SchemaConfigModeAttr: + // anything goes + case SchemaConfigModeAuto: + // Since "Auto" for Elem: *Resource would create a nested block, + // and that's impossible inside an attribute, we require it to be + // explicitly overridden as mode "Attr" for clarity. + if _, ok := v.Elem.(*Resource); ok { + isBlock = true + if attrsOnly { + return fmt.Errorf("%s: in *schema.Resource with ConfigMode of attribute, so must also have ConfigMode of attribute", k) + } + } + default: + return fmt.Errorf("%s: invalid ConfigMode value", k) + } + + if isBlock && v.SkipCoreTypeCheck { + return fmt.Errorf("%s: SkipCoreTypeCheck must be false unless ConfigMode is attribute", k) + } + if v.Computed && v.Default != nil { return fmt.Errorf("%s: Default must be nil if computed", k) } @@ -695,7 +833,9 @@ func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error { switch t := v.Elem.(type) { case *Resource: - if err := t.InternalValidate(topSchemaMap, true); err != nil { + attrsOnly := attrsOnly || v.ConfigMode == SchemaConfigModeAttr + + if err := schemaMap(t.Schema).internalValidate(topSchemaMap, attrsOnly); err != nil { return err } case *Schema: @@ -785,10 +925,19 @@ func (m schemaMap) diff( for attrK, attrV := range unsupressedDiff.Attributes { switch rd := d.(type) { case *ResourceData: - if schema.DiffSuppressFunc != nil && - attrV != nil && + if schema.DiffSuppressFunc != nil && attrV != nil && schema.DiffSuppressFunc(attrK, attrV.Old, attrV.New, rd) { - continue + // If this attr diff is suppressed, we may still need it in the + // overall diff if it's contained within a set. Rather than + // dropping the diff, make it a NOOP. + if !all { + continue + } + + attrV = &terraform.ResourceAttrDiff{ + Old: attrV.Old, + New: attrV.Old, + } } } diff.Attributes[attrK] = attrV @@ -1171,7 +1320,7 @@ func (m schemaMap) diffString( return fmt.Errorf("%s: %s", k, err) } - if os == ns && !all { + if os == ns && !all && !computed { // They're the same value. If there old value is not blank or we // have an ID, then return right away since we're already setup. if os != "" || d.Id() != "" { @@ -1179,7 +1328,7 @@ func (m schemaMap) diffString( } // Otherwise, only continue if we're computed - if !schema.Computed && !computed { + if !schema.Computed { return nil } } @@ -1210,7 +1359,7 @@ func (m schemaMap) inputString( input terraform.UIInput, k string, schema *Schema) (interface{}, error) { - result, err := input.Input(&terraform.InputOpts{ + result, err := input.Input(context.Background(), &terraform.InputOpts{ Id: k, Query: k, Description: schema.Description, @@ -1252,6 +1401,13 @@ func (m schemaMap) validate( "%q: this field cannot be set", k)} } + if raw == config.UnknownVariableValue { + // If the value is unknown then we can't validate it yet. + // In particular, this avoids spurious type errors where downstream + // validation code sees UnknownVariableValue as being just a string. + return nil, nil + } + err := m.validateConflictingAttributes(k, schema, c) if err != nil { return nil, []error{err} @@ -1269,10 +1425,15 @@ func (m schemaMap) validateConflictingAttributes( return nil } - for _, conflicting_key := range schema.ConflictsWith { - if _, ok := c.Get(conflicting_key); ok { + for _, conflictingKey := range schema.ConflictsWith { + if raw, ok := c.Get(conflictingKey); ok { + if raw == config.UnknownVariableValue { + // An unknown value might become unset (null) once known, so + // we must defer validation until it's known. + continue + } return fmt.Errorf( - "%q: conflicts with %s", k, conflicting_key) + "%q: conflicts with %s", k, conflictingKey) } } @@ -1284,6 +1445,13 @@ func (m schemaMap) validateList( raw interface{}, schema *Schema, c *terraform.ResourceConfig) ([]string, []error) { + // first check if the list is wholly unknown + if s, ok := raw.(string); ok { + if s == config.UnknownVariableValue { + return nil, nil + } + } + // We use reflection to verify the slice because you can't // case to []interface{} unless the slice is exactly that type. rawV := reflect.ValueOf(raw) @@ -1355,6 +1523,13 @@ func (m schemaMap) validateMap( raw interface{}, schema *Schema, c *terraform.ResourceConfig) ([]string, []error) { + // first check if the list is wholly unknown + if s, ok := raw.(string); ok { + if s == config.UnknownVariableValue { + return nil, nil + } + } + // We use reflection to verify the slice because you can't // case to []interface{} unless the slice is exactly that type. rawV := reflect.ValueOf(raw) diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/set.go b/vendor/github.com/hashicorp/terraform/helper/schema/set.go index cba289035d..8ee89e4756 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/set.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/set.go @@ -198,6 +198,16 @@ func (s *Set) add(item interface{}, computed bool) string { code := s.hash(item) if computed { code = "~" + code + + if isProto5() { + tmpCode := code + count := 0 + for _, exists := s.m[tmpCode]; exists; _, exists = s.m[tmpCode] { + count++ + tmpCode = fmt.Sprintf("%s%d", code, count) + } + code = tmpCode + } } if _, ok := s.m[code]; !ok { diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/shims.go b/vendor/github.com/hashicorp/terraform/helper/schema/shims.go new file mode 100644 index 0000000000..dc840204b9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/shims.go @@ -0,0 +1,157 @@ +package schema + +import ( + "encoding/json" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/terraform" +) + +// DiffFromValues takes the current state and desired state as cty.Values and +// derives a terraform.InstanceDiff to give to the legacy providers. This is +// used to take the states provided by the new ApplyResourceChange method and +// convert them to a state+diff required for the legacy Apply method. +func DiffFromValues(prior, planned cty.Value, res *Resource) (*terraform.InstanceDiff, error) { + return diffFromValues(prior, planned, res, nil) +} + +// diffFromValues takes an additional CustomizeDiffFunc, so we can generate our +// test fixtures from the legacy tests. In the new provider protocol the diff +// only needs to be created for the apply operation, and any customizations +// have already been done. +func diffFromValues(prior, planned cty.Value, res *Resource, cust CustomizeDiffFunc) (*terraform.InstanceDiff, error) { + instanceState, err := res.ShimInstanceStateFromValue(prior) + if err != nil { + return nil, err + } + + configSchema := res.CoreConfigSchema() + + cfg := terraform.NewResourceConfigShimmed(planned, configSchema) + removeConfigUnknowns(cfg.Config) + removeConfigUnknowns(cfg.Raw) + + diff, err := schemaMap(res.Schema).Diff(instanceState, cfg, cust, nil, false) + if err != nil { + return nil, err + } + + return diff, err +} + +// During apply the only unknown values are those which are to be computed by +// the resource itself. These may have been marked as unknown config values, and +// need to be removed to prevent the UnknownVariableValue from appearing the diff. +func removeConfigUnknowns(cfg map[string]interface{}) { + for k, v := range cfg { + switch v := v.(type) { + case string: + if v == config.UnknownVariableValue { + cfg[k] = "" + } + case []interface{}: + for _, i := range v { + if m, ok := i.(map[string]interface{}); ok { + removeConfigUnknowns(m) + } + } + case map[string]interface{}: + removeConfigUnknowns(v) + } + } +} + +// ApplyDiff takes a cty.Value state and applies a terraform.InstanceDiff to +// get a new cty.Value state. This is used to convert the diff returned from +// the legacy provider Diff method to the state required for the new +// PlanResourceChange method. +func ApplyDiff(base cty.Value, d *terraform.InstanceDiff, schema *configschema.Block) (cty.Value, error) { + return d.ApplyToValue(base, schema) +} + +// StateValueToJSONMap converts a cty.Value to generic JSON map via the cty JSON +// encoding. +func StateValueToJSONMap(val cty.Value, ty cty.Type) (map[string]interface{}, error) { + js, err := ctyjson.Marshal(val, ty) + if err != nil { + return nil, err + } + + var m map[string]interface{} + if err := json.Unmarshal(js, &m); err != nil { + return nil, err + } + + return m, nil +} + +// JSONMapToStateValue takes a generic json map[string]interface{} and converts it +// to the specific type, ensuring that the values conform to the schema. +func JSONMapToStateValue(m map[string]interface{}, block *configschema.Block) (cty.Value, error) { + var val cty.Value + + js, err := json.Marshal(m) + if err != nil { + return val, err + } + + val, err = ctyjson.Unmarshal(js, block.ImpliedType()) + if err != nil { + return val, err + } + + return block.CoerceValue(val) +} + +// StateValueFromInstanceState converts a terraform.InstanceState to a +// cty.Value as described by the provided cty.Type, and maintains the resource +// ID as the "id" attribute. +func StateValueFromInstanceState(is *terraform.InstanceState, ty cty.Type) (cty.Value, error) { + return is.AttrsAsObjectValue(ty) +} + +// LegacyResourceSchema takes a *Resource and returns a deep copy with 0.12 specific +// features removed. This is used by the shims to get a configschema that +// directly matches the structure of the schema.Resource. +func LegacyResourceSchema(r *Resource) *Resource { + if r == nil { + return nil + } + // start with a shallow copy + newResource := new(Resource) + *newResource = *r + newResource.Schema = map[string]*Schema{} + + for k, s := range r.Schema { + newResource.Schema[k] = LegacySchema(s) + } + + return newResource +} + +// LegacySchema takes a *Schema and returns a deep copy with some 0.12-specific +// features disabled. This is used by the shims to get a configschema that +// better reflects the given schema.Resource, without any adjustments we +// make for when sending a schema to Terraform Core. +func LegacySchema(s *Schema) *Schema { + if s == nil { + return nil + } + // start with a shallow copy + newSchema := new(Schema) + *newSchema = *s + newSchema.SkipCoreTypeCheck = false + + switch e := newSchema.Elem.(type) { + case *Schema: + newSchema.Elem = LegacySchema(e) + case *Resource: + newSchema.Elem = LegacyResourceSchema(e) + } + + return newSchema +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/testing.go b/vendor/github.com/hashicorp/terraform/helper/schema/testing.go index da754ac788..a367a1fd59 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/testing.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/testing.go @@ -18,7 +18,7 @@ func TestResourceDataRaw( } sm := schemaMap(schema) - diff, err := sm.Diff(nil, terraform.NewResourceConfig(c), nil, nil) + diff, err := sm.Diff(nil, terraform.NewResourceConfig(c), nil, nil, true) if err != nil { t.Fatalf("err: %s", err) } diff --git a/vendor/github.com/hashicorp/terraform/helper/validation/validation.go b/vendor/github.com/hashicorp/terraform/helper/validation/validation.go index e9edcd3338..8c8ac54fca 100644 --- a/vendor/github.com/hashicorp/terraform/helper/validation/validation.go +++ b/vendor/github.com/hashicorp/terraform/helper/validation/validation.go @@ -13,6 +13,39 @@ import ( "github.com/hashicorp/terraform/helper/structure" ) +// All returns a SchemaValidateFunc which tests if the provided value +// passes all provided SchemaValidateFunc +func All(validators ...schema.SchemaValidateFunc) schema.SchemaValidateFunc { + return func(i interface{}, k string) ([]string, []error) { + var allErrors []error + var allWarnings []string + for _, validator := range validators { + validatorWarnings, validatorErrors := validator(i, k) + allWarnings = append(allWarnings, validatorWarnings...) + allErrors = append(allErrors, validatorErrors...) + } + return allWarnings, allErrors + } +} + +// Any returns a SchemaValidateFunc which tests if the provided value +// passes any of the provided SchemaValidateFunc +func Any(validators ...schema.SchemaValidateFunc) schema.SchemaValidateFunc { + return func(i interface{}, k string) ([]string, []error) { + var allErrors []error + var allWarnings []string + for _, validator := range validators { + validatorWarnings, validatorErrors := validator(i, k) + if len(validatorWarnings) == 0 && len(validatorErrors) == 0 { + return []string{}, []error{} + } + allWarnings = append(allWarnings, validatorWarnings...) + allErrors = append(allErrors, validatorErrors...) + } + return allWarnings, allErrors + } +} + // IntBetween returns a SchemaValidateFunc which tests if the provided value // is of type int and is between min and max (inclusive) func IntBetween(min, max int) schema.SchemaValidateFunc { @@ -70,6 +103,27 @@ func IntAtMost(max int) schema.SchemaValidateFunc { } } +// IntInSlice returns a SchemaValidateFunc which tests if the provided value +// is of type int and matches the value of an element in the valid slice +func IntInSlice(valid []int) schema.SchemaValidateFunc { + return func(i interface{}, k string) (s []string, es []error) { + v, ok := i.(int) + if !ok { + es = append(es, fmt.Errorf("expected type of %s to be an integer", k)) + return + } + + for _, validInt := range valid { + if v == validInt { + return + } + } + + es = append(es, fmt.Errorf("expected %s to be one of %v, got %d", k, valid, v)) + return + } +} + // StringInSlice returns a SchemaValidateFunc which tests if the provided value // is of type string and matches the value of an element in the valid slice // will test with in lower case if ignoreCase is true diff --git a/vendor/github.com/hashicorp/terraform/internal/earlyconfig/config.go b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/config.go new file mode 100644 index 0000000000..a9b8f9883c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/config.go @@ -0,0 +1,123 @@ +package earlyconfig + +import ( + "fmt" + "sort" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/moduledeps" + "github.com/hashicorp/terraform/plugin/discovery" + "github.com/hashicorp/terraform/tfdiags" +) + +// A Config is a node in the tree of modules within a configuration. +// +// The module tree is constructed by following ModuleCall instances recursively +// through the root module transitively into descendent modules. +type Config struct { + // RootModule points to the Config for the root module within the same + // module tree as this module. If this module _is_ the root module then + // this is self-referential. + Root *Config + + // ParentModule points to the Config for the module that directly calls + // this module. If this is the root module then this field is nil. + Parent *Config + + // Path is a sequence of module logical names that traverse from the root + // module to this config. Path is empty for the root module. + // + // This should only be used to display paths to the end-user in rare cases + // where we are talking about the static module tree, before module calls + // have been resolved. In most cases, an addrs.ModuleInstance describing + // a node in the dynamic module tree is better, since it will then include + // any keys resulting from evaluating "count" and "for_each" arguments. + Path addrs.Module + + // ChildModules points to the Config for each of the direct child modules + // called from this module. The keys in this map match the keys in + // Module.ModuleCalls. + Children map[string]*Config + + // Module points to the object describing the configuration for the + // various elements (variables, resources, etc) defined by this module. + Module *tfconfig.Module + + // CallPos is the source position for the header of the module block that + // requested this module. + // + // This field is meaningless for the root module, where its contents are undefined. + CallPos tfconfig.SourcePos + + // SourceAddr is the source address that the referenced module was requested + // from, as specified in configuration. + // + // This field is meaningless for the root module, where its contents are undefined. + SourceAddr string + + // Version is the specific version that was selected for this module, + // based on version constraints given in configuration. + // + // This field is nil if the module was loaded from a non-registry source, + // since versions are not supported for other sources. + // + // This field is meaningless for the root module, where it will always + // be nil. + Version *version.Version +} + +// ProviderDependencies returns the provider dependencies for the recieving +// config, including all of its descendent modules. +func (c *Config) ProviderDependencies() (*moduledeps.Module, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + var name string + if len(c.Path) > 0 { + name = c.Path[len(c.Path)-1] + } + + ret := &moduledeps.Module{ + Name: name, + } + + providers := make(moduledeps.Providers) + for name, reqs := range c.Module.RequiredProviders { + inst := moduledeps.ProviderInstance(name) + var constraints version.Constraints + for _, reqStr := range reqs { + if reqStr != "" { + constraint, err := version.NewConstraint(reqStr) + if err != nil { + diags = diags.Append(wrapDiagnostic(tfconfig.Diagnostic{ + Severity: tfconfig.DiagError, + Summary: "Invalid provider version constraint", + Detail: fmt.Sprintf("Invalid version constraint %q for provider %s.", reqStr, name), + })) + continue + } + constraints = append(constraints, constraint...) + } + } + providers[inst] = moduledeps.ProviderDependency{ + Constraints: discovery.NewConstraints(constraints), + Reason: moduledeps.ProviderDependencyExplicit, + } + } + ret.Providers = providers + + childNames := make([]string, 0, len(c.Children)) + for name := range c.Children { + childNames = append(childNames, name) + } + sort.Strings(childNames) + + for _, name := range childNames { + child, childDiags := c.Children[name].ProviderDependencies() + ret.Children = append(ret.Children, child) + diags = diags.Append(childDiags) + } + + return ret, diags +} diff --git a/vendor/github.com/hashicorp/terraform/internal/earlyconfig/config_build.go b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/config_build.go new file mode 100644 index 0000000000..770d5dfbe3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/config_build.go @@ -0,0 +1,144 @@ +package earlyconfig + +import ( + "fmt" + "sort" + "strings" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/tfdiags" +) + +// BuildConfig constructs a Config from a root module by loading all of its +// descendent modules via the given ModuleWalker. +func BuildConfig(root *tfconfig.Module, walker ModuleWalker) (*Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + cfg := &Config{ + Module: root, + } + cfg.Root = cfg // Root module is self-referential. + cfg.Children, diags = buildChildModules(cfg, walker) + return cfg, diags +} + +func buildChildModules(parent *Config, walker ModuleWalker) (map[string]*Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + ret := map[string]*Config{} + calls := parent.Module.ModuleCalls + + // We'll sort the calls by their local names so that they'll appear in a + // predictable order in any logging that's produced during the walk. + callNames := make([]string, 0, len(calls)) + for k := range calls { + callNames = append(callNames, k) + } + sort.Strings(callNames) + + for _, callName := range callNames { + call := calls[callName] + path := make([]string, len(parent.Path)+1) + copy(path, parent.Path) + path[len(path)-1] = call.Name + + var vc version.Constraints + if strings.TrimSpace(call.Version) != "" { + var err error + vc, err = version.NewConstraint(call.Version) + if err != nil { + diags = diags.Append(wrapDiagnostic(tfconfig.Diagnostic{ + Severity: tfconfig.DiagError, + Summary: "Invalid version constraint", + Detail: fmt.Sprintf("Module %q (declared at %s line %d) has invalid version constraint %q: %s.", callName, call.Pos.Filename, call.Pos.Line, call.Version, err), + })) + continue + } + } + + req := ModuleRequest{ + Name: call.Name, + Path: path, + SourceAddr: call.Source, + VersionConstraints: vc, + Parent: parent, + CallPos: call.Pos, + } + + mod, ver, modDiags := walker.LoadModule(&req) + diags = append(diags, modDiags...) + if mod == nil { + // nil can be returned if the source address was invalid and so + // nothing could be loaded whatsoever. LoadModule should've + // returned at least one error diagnostic in that case. + continue + } + + child := &Config{ + Parent: parent, + Root: parent.Root, + Path: path, + Module: mod, + CallPos: call.Pos, + SourceAddr: call.Source, + Version: ver, + } + + child.Children, modDiags = buildChildModules(child, walker) + diags = diags.Append(modDiags) + + ret[call.Name] = child + } + + return ret, diags +} + +// ModuleRequest is used as part of the ModuleWalker interface used with +// function BuildConfig. +type ModuleRequest struct { + // Name is the "logical name" of the module call within configuration. + // This is provided in case the name is used as part of a storage key + // for the module, but implementations must otherwise treat it as an + // opaque string. It is guaranteed to have already been validated as an + // HCL identifier and UTF-8 encoded. + Name string + + // Path is a list of logical names that traverse from the root module to + // this module. This can be used, for example, to form a lookup key for + // each distinct module call in a configuration, allowing for multiple + // calls with the same name at different points in the tree. + Path addrs.Module + + // SourceAddr is the source address string provided by the user in + // configuration. + SourceAddr string + + // VersionConstraint is the version constraint applied to the module in + // configuration. + VersionConstraints version.Constraints + + // Parent is the partially-constructed module tree node that the loaded + // module will be added to. Callers may refer to any field of this + // structure except Children, which is still under construction when + // ModuleRequest objects are created and thus has undefined content. + // The main reason this is provided is so that full module paths can + // be constructed for uniqueness. + Parent *Config + + // CallRange is the source position for the header of the "module" block + // in configuration that prompted this request. + CallPos tfconfig.SourcePos +} + +// ModuleWalker is an interface used with BuildConfig. +type ModuleWalker interface { + LoadModule(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) +} + +// ModuleWalkerFunc is an implementation of ModuleWalker that directly wraps +// a callback function, for more convenient use of that interface. +type ModuleWalkerFunc func(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) + +func (f ModuleWalkerFunc) LoadModule(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) { + return f(req) +} diff --git a/vendor/github.com/hashicorp/terraform/internal/earlyconfig/diagnostics.go b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/diagnostics.go new file mode 100644 index 0000000000..9b2fd7f71d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/diagnostics.go @@ -0,0 +1,78 @@ +package earlyconfig + +import ( + "fmt" + + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform/tfdiags" +) + +func wrapDiagnostics(diags tfconfig.Diagnostics) tfdiags.Diagnostics { + ret := make(tfdiags.Diagnostics, len(diags)) + for i, diag := range diags { + ret[i] = wrapDiagnostic(diag) + } + return ret +} + +func wrapDiagnostic(diag tfconfig.Diagnostic) tfdiags.Diagnostic { + return wrappedDiagnostic{ + d: diag, + } +} + +type wrappedDiagnostic struct { + d tfconfig.Diagnostic +} + +func (d wrappedDiagnostic) Severity() tfdiags.Severity { + switch d.d.Severity { + case tfconfig.DiagError: + return tfdiags.Error + case tfconfig.DiagWarning: + return tfdiags.Warning + default: + // Should never happen since there are no other severities + return 0 + } +} + +func (d wrappedDiagnostic) Description() tfdiags.Description { + // Since the inspect library doesn't produce precise source locations, + // we include the position information as part of the error message text. + // See the comment inside method "Source" for more information. + switch { + case d.d.Pos == nil: + return tfdiags.Description{ + Summary: d.d.Summary, + Detail: d.d.Detail, + } + case d.d.Detail != "": + return tfdiags.Description{ + Summary: d.d.Summary, + Detail: fmt.Sprintf("On %s line %d: %s", d.d.Pos.Filename, d.d.Pos.Line, d.d.Detail), + } + default: + return tfdiags.Description{ + Summary: fmt.Sprintf("%s (on %s line %d)", d.d.Summary, d.d.Pos.Filename, d.d.Pos.Line), + } + } +} + +func (d wrappedDiagnostic) Source() tfdiags.Source { + // Since the inspect library is constrained by the lowest common denominator + // between legacy HCL and modern HCL, it only returns ranges at whole-line + // granularity, and that isn't sufficient to populate a tfdiags.Source + // and so we'll just omit ranges altogether and include the line number in + // the Description text. + // + // Callers that want to return nicer errors should consider reacting to + // earlyconfig errors by attempting a follow-up parse with the normal + // config loader, which can produce more precise source location + // information. + return tfdiags.Source{} +} + +func (d wrappedDiagnostic) FromExpr() *tfdiags.FromExpr { + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/internal/earlyconfig/doc.go b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/doc.go new file mode 100644 index 0000000000..a9cf10f37c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/doc.go @@ -0,0 +1,20 @@ +// Package earlyconfig is a specialized alternative to the top-level "configs" +// package that does only shallow processing of configuration and is therefore +// able to be much more liberal than the full config loader in what it accepts. +// +// In particular, it can accept both current and legacy HCL syntax, and it +// ignores top-level blocks that it doesn't recognize. These two characteristics +// make this package ideal for dependency-checking use-cases so that we are +// more likely to be able to return an error message about an explicit +// incompatibility than to return a less-actionable message about a construct +// not being supported. +// +// However, its liberal approach also means it should be used sparingly. It +// exists primarily for "terraform init", so that it is able to detect +// incompatibilities more robustly when installing dependencies. For most +// other use-cases, use the "configs" and "configs/configload" packages. +// +// Package earlyconfig is a wrapper around the terraform-config-inspect +// codebase, adding to it just some helper functionality for Terraform's own +// use-cases. +package earlyconfig diff --git a/vendor/github.com/hashicorp/terraform/internal/earlyconfig/module.go b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/module.go new file mode 100644 index 0000000000..d2d628797a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/module.go @@ -0,0 +1,13 @@ +package earlyconfig + +import ( + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform/tfdiags" +) + +// LoadModule loads some top-level metadata for the module in the given +// directory. +func LoadModule(dir string) (*tfconfig.Module, tfdiags.Diagnostics) { + mod, diags := tfconfig.LoadModule(dir) + return mod, wrapDiagnostics(diags) +} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/copy_dir.go b/vendor/github.com/hashicorp/terraform/internal/initwd/copy_dir.go new file mode 100644 index 0000000000..7096ff74f8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/initwd/copy_dir.go @@ -0,0 +1,125 @@ +package initwd + +import ( + "io" + "os" + "path/filepath" + "strings" +) + +// copyDir copies the src directory contents into dst. Both directories +// should already exist. +func copyDir(dst, src string) error { + src, err := filepath.EvalSymlinks(src) + if err != nil { + return err + } + + walkFn := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if path == src { + return nil + } + + if strings.HasPrefix(filepath.Base(path), ".") { + // Skip any dot files + if info.IsDir() { + return filepath.SkipDir + } else { + return nil + } + } + + // The "path" has the src prefixed to it. We need to join our + // destination with the path without the src on it. + dstPath := filepath.Join(dst, path[len(src):]) + + // we don't want to try and copy the same file over itself. + if eq, err := sameFile(path, dstPath); eq { + return nil + } else if err != nil { + return err + } + + // If we have a directory, make that subdirectory, then continue + // the walk. + if info.IsDir() { + if path == filepath.Join(src, dst) { + // dst is in src; don't walk it. + return nil + } + + if err := os.MkdirAll(dstPath, 0755); err != nil { + return err + } + + return nil + } + + // If the current path is a symlink, recreate the symlink relative to + // the dst directory + if info.Mode()&os.ModeSymlink == os.ModeSymlink { + target, err := os.Readlink(path) + if err != nil { + return err + } + + return os.Symlink(target, dstPath) + } + + // If we have a file, copy the contents. + srcF, err := os.Open(path) + if err != nil { + return err + } + defer srcF.Close() + + dstF, err := os.Create(dstPath) + if err != nil { + return err + } + defer dstF.Close() + + if _, err := io.Copy(dstF, srcF); err != nil { + return err + } + + // Chmod it + return os.Chmod(dstPath, info.Mode()) + } + + return filepath.Walk(src, walkFn) +} + +// sameFile tried to determine if to paths are the same file. +// If the paths don't match, we lookup the inode on supported systems. +func sameFile(a, b string) (bool, error) { + if a == b { + return true, nil + } + + aIno, err := inode(a) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + + bIno, err := inode(b) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + + if aIno > 0 && aIno == bIno { + return true, nil + } + + return false, nil +} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/doc.go b/vendor/github.com/hashicorp/terraform/internal/initwd/doc.go new file mode 100644 index 0000000000..b9d938dbb0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/initwd/doc.go @@ -0,0 +1,7 @@ +// Package initwd contains various helper functions used by the "terraform init" +// command to initialize a working directory. +// +// These functions may also be used from testing code to simulate the behaviors +// of "terraform init" against test fixtures, but should not be used elsewhere +// in the main code. +package initwd diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/from_module.go b/vendor/github.com/hashicorp/terraform/internal/initwd/from_module.go new file mode 100644 index 0000000000..6b40d08d69 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/initwd/from_module.go @@ -0,0 +1,363 @@ +package initwd + +import ( + "fmt" + "github.com/hashicorp/terraform/internal/earlyconfig" + "io/ioutil" + "log" + "os" + "path/filepath" + "sort" + "strings" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform/internal/modsdir" + "github.com/hashicorp/terraform/registry" + "github.com/hashicorp/terraform/tfdiags" +) + +const initFromModuleRootCallName = "root" +const initFromModuleRootKeyPrefix = initFromModuleRootCallName + "." + +// DirFromModule populates the given directory (which must exist and be +// empty) with the contents of the module at the given source address. +// +// It does this by installing the given module and all of its descendent +// modules in a temporary root directory and then copying the installed +// files into suitable locations. As a consequence, any diagnostics it +// generates will reveal the location of this temporary directory to the +// user. +// +// This rather roundabout installation approach is taken to ensure that +// installation proceeds in a manner identical to normal module installation. +// +// If the given source address specifies a sub-directory of the given +// package then only the sub-directory and its descendents will be copied +// into the given root directory, which will cause any relative module +// references using ../ from that module to be unresolvable. Error diagnostics +// are produced in that case, to prompt the user to rewrite the source strings +// to be absolute references to the original remote module. +func DirFromModule(rootDir, modulesDir, sourceAddr string, reg *registry.Client, hooks ModuleInstallHooks) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + // The way this function works is pretty ugly, but we accept it because + // -from-module is a less important case than normal module installation + // and so it's better to keep this ugly complexity out here rather than + // adding even more complexity to the normal module installer. + + // The target directory must exist but be empty. + { + entries, err := ioutil.ReadDir(rootDir) + if err != nil { + if os.IsNotExist(err) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Target directory does not exist", + fmt.Sprintf("Cannot initialize non-existent directory %s.", rootDir), + )) + } else { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read target directory", + fmt.Sprintf("Error reading %s to ensure it is empty: %s.", rootDir, err), + )) + } + return diags + } + haveEntries := false + for _, entry := range entries { + if entry.Name() == "." || entry.Name() == ".." || entry.Name() == ".terraform" { + continue + } + haveEntries = true + } + if haveEntries { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Can't populate non-empty directory", + fmt.Sprintf("The target directory %s is not empty, so it cannot be initialized with the -from-module=... option.", rootDir), + )) + return diags + } + } + + instDir := filepath.Join(rootDir, ".terraform/init-from-module") + inst := NewModuleInstaller(instDir, reg) + log.Printf("[DEBUG] installing modules in %s to initialize working directory from %q", instDir, sourceAddr) + os.RemoveAll(instDir) // if this fails then we'll fail on MkdirAll below too + err := os.MkdirAll(instDir, os.ModePerm) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to create temporary directory", + fmt.Sprintf("Failed to create temporary directory %s: %s.", instDir, err), + )) + return diags + } + + instManifest := make(modsdir.Manifest) + retManifest := make(modsdir.Manifest) + + fakeFilename := fmt.Sprintf("-from-module=%q", sourceAddr) + fakePos := tfconfig.SourcePos{ + Filename: fakeFilename, + Line: 1, + } + + // -from-module allows relative paths but it's different than a normal + // module address where it'd be resolved relative to the module call + // (which is synthetic, here.) To address this, we'll just patch up any + // relative paths to be absolute paths before we run, ensuring we'll + // get the right result. This also, as an important side-effect, ensures + // that the result will be "downloaded" with go-getter (copied from the + // source location), rather than just recorded as a relative path. + { + maybePath := filepath.ToSlash(sourceAddr) + if maybePath == "." || strings.HasPrefix(maybePath, "./") || strings.HasPrefix(maybePath, "../") { + if wd, err := os.Getwd(); err == nil { + sourceAddr = filepath.Join(wd, sourceAddr) + log.Printf("[TRACE] -from-module relative path rewritten to absolute path %s", sourceAddr) + } + } + } + + // Now we need to create an artificial root module that will seed our + // installation process. + fakeRootModule := &tfconfig.Module{ + ModuleCalls: map[string]*tfconfig.ModuleCall{ + initFromModuleRootCallName: { + Name: initFromModuleRootCallName, + Source: sourceAddr, + Pos: fakePos, + }, + }, + } + + // wrapHooks filters hook notifications to only include Download calls + // and to trim off the initFromModuleRootCallName prefix. We'll produce + // our own Install notifications directly below. + wrapHooks := installHooksInitDir{ + Wrapped: hooks, + } + getter := reusingGetter{} + _, instDiags := inst.installDescendentModules(fakeRootModule, rootDir, instManifest, true, wrapHooks, getter) + diags = append(diags, instDiags...) + if instDiags.HasErrors() { + return diags + } + + // If all of that succeeded then we'll now migrate what was installed + // into the final directory structure. + err = os.MkdirAll(modulesDir, os.ModePerm) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to create local modules directory", + fmt.Sprintf("Failed to create modules directory %s: %s.", modulesDir, err), + )) + return diags + } + + recordKeys := make([]string, 0, len(instManifest)) + for k := range instManifest { + recordKeys = append(recordKeys, k) + } + sort.Strings(recordKeys) + + for _, recordKey := range recordKeys { + record := instManifest[recordKey] + + if record.Key == initFromModuleRootCallName { + // We've found the module the user requested, which we must + // now copy into rootDir so it can be used directly. + log.Printf("[TRACE] copying new root module from %s to %s", record.Dir, rootDir) + err := copyDir(rootDir, record.Dir) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to copy root module", + fmt.Sprintf("Error copying root module %q from %s to %s: %s.", sourceAddr, record.Dir, rootDir, err), + )) + continue + } + + // We'll try to load the newly-copied module here just so we can + // sniff for any module calls that ../ out of the root directory + // and must thus be rewritten to be absolute addresses again. + // For now we can't do this rewriting automatically, but we'll + // generate an error to help the user do it manually. + mod, _ := earlyconfig.LoadModule(rootDir) // ignore diagnostics since we're just doing value-add here anyway + if mod != nil { + for _, mc := range mod.ModuleCalls { + if pathTraversesUp(mc.Source) { + packageAddr, givenSubdir := splitAddrSubdir(sourceAddr) + newSubdir := filepath.Join(givenSubdir, mc.Source) + if pathTraversesUp(newSubdir) { + // This should never happen in any reasonable + // configuration since this suggests a path that + // traverses up out of the package root. We'll just + // ignore this, since we'll fail soon enough anyway + // trying to resolve this path when this module is + // loaded. + continue + } + + var newAddr = packageAddr + if newSubdir != "" { + newAddr = fmt.Sprintf("%s//%s", newAddr, filepath.ToSlash(newSubdir)) + } + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Root module references parent directory", + fmt.Sprintf("The requested module %q refers to a module via its parent directory. To use this as a new root module this source string must be rewritten as a remote source address, such as %q.", sourceAddr, newAddr), + )) + continue + } + } + } + + retManifest[""] = modsdir.Record{ + Key: "", + Dir: rootDir, + } + continue + } + + if !strings.HasPrefix(record.Key, initFromModuleRootKeyPrefix) { + // Ignore the *real* root module, whose key is empty, since + // we're only interested in the module named "root" and its + // descendents. + continue + } + + newKey := record.Key[len(initFromModuleRootKeyPrefix):] + instPath := filepath.Join(modulesDir, newKey) + tempPath := filepath.Join(instDir, record.Key) + + // tempPath won't be present for a module that was installed from + // a relative path, so in that case we just record the installation + // directory and assume it was already copied into place as part + // of its parent. + if _, err := os.Stat(tempPath); err != nil { + if !os.IsNotExist(err) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to stat temporary module install directory", + fmt.Sprintf("Error from stat %s for module %s: %s.", instPath, newKey, err), + )) + continue + } + + var parentKey string + if lastDot := strings.LastIndexByte(newKey, '.'); lastDot != -1 { + parentKey = newKey[:lastDot] + } else { + parentKey = "" // parent is the root module + } + + parentOld := instManifest[initFromModuleRootKeyPrefix+parentKey] + parentNew := retManifest[parentKey] + + // We need to figure out which portion of our directory is the + // parent package path and which portion is the subdirectory + // under that. + baseDirRel, err := filepath.Rel(parentOld.Dir, record.Dir) + if err != nil { + // Should never happen, because we constructed both directories + // from the same base and so they must have a common prefix. + panic(err) + } + + newDir := filepath.Join(parentNew.Dir, baseDirRel) + log.Printf("[TRACE] relative reference for %s rewritten from %s to %s", newKey, record.Dir, newDir) + newRecord := record // shallow copy + newRecord.Dir = newDir + newRecord.Key = newKey + retManifest[newKey] = newRecord + hooks.Install(newRecord.Key, newRecord.Version, newRecord.Dir) + continue + } + + err = os.MkdirAll(instPath, os.ModePerm) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to create module install directory", + fmt.Sprintf("Error creating directory %s for module %s: %s.", instPath, newKey, err), + )) + continue + } + + // We copy rather than "rename" here because renaming between directories + // can be tricky in edge-cases like network filesystems, etc. + log.Printf("[TRACE] copying new module %s from %s to %s", newKey, record.Dir, instPath) + err := copyDir(instPath, tempPath) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to copy descendent module", + fmt.Sprintf("Error copying module %q from %s to %s: %s.", newKey, tempPath, rootDir, err), + )) + continue + } + + subDir, err := filepath.Rel(tempPath, record.Dir) + if err != nil { + // Should never happen, because we constructed both directories + // from the same base and so they must have a common prefix. + panic(err) + } + + newRecord := record // shallow copy + newRecord.Dir = filepath.Join(instPath, subDir) + newRecord.Key = newKey + retManifest[newKey] = newRecord + hooks.Install(newRecord.Key, newRecord.Version, newRecord.Dir) + } + + retManifest.WriteSnapshotToDir(modulesDir) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to write module manifest", + fmt.Sprintf("Error writing module manifest: %s.", err), + )) + } + + if !diags.HasErrors() { + // Try to clean up our temporary directory, but don't worry if we don't + // succeed since it shouldn't hurt anything. + os.RemoveAll(instDir) + } + + return diags +} + +func pathTraversesUp(path string) bool { + return strings.HasPrefix(filepath.ToSlash(path), "../") +} + +// installHooksInitDir is an adapter wrapper for an InstallHooks that +// does some fakery to make downloads look like they are happening in their +// final locations, rather than in the temporary loader we use. +// +// It also suppresses "Install" calls entirely, since InitDirFromModule +// does its own installation steps after the initial installation pass +// has completed. +type installHooksInitDir struct { + Wrapped ModuleInstallHooks + ModuleInstallHooksImpl +} + +func (h installHooksInitDir) Download(moduleAddr, packageAddr string, version *version.Version) { + if !strings.HasPrefix(moduleAddr, initFromModuleRootKeyPrefix) { + // We won't announce the root module, since hook implementations + // don't expect to see that and the caller will usually have produced + // its own user-facing notification about what it's doing anyway. + return + } + + trimAddr := moduleAddr[len(initFromModuleRootKeyPrefix):] + h.Wrapped.Download(trimAddr, packageAddr, version) +} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/getter.go b/vendor/github.com/hashicorp/terraform/internal/initwd/getter.go new file mode 100644 index 0000000000..50e2572afd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/initwd/getter.go @@ -0,0 +1,210 @@ +package initwd + +import ( + "fmt" + "log" + "os" + "path/filepath" + "strings" + + cleanhttp "github.com/hashicorp/go-cleanhttp" + getter "github.com/hashicorp/go-getter" + "github.com/hashicorp/terraform/registry/regsrc" +) + +// We configure our own go-getter detector and getter sets here, because +// the set of sources we support is part of Terraform's documentation and +// so we don't want any new sources introduced in go-getter to sneak in here +// and work even though they aren't documented. This also insulates us from +// any meddling that might be done by other go-getter callers linked into our +// executable. + +var goGetterDetectors = []getter.Detector{ + new(getter.GitHubDetector), + new(getter.BitBucketDetector), + new(getter.S3Detector), + new(getter.FileDetector), +} + +var goGetterNoDetectors = []getter.Detector{} + +var goGetterDecompressors = map[string]getter.Decompressor{ + "bz2": new(getter.Bzip2Decompressor), + "gz": new(getter.GzipDecompressor), + "xz": new(getter.XzDecompressor), + "zip": new(getter.ZipDecompressor), + + "tar.bz2": new(getter.TarBzip2Decompressor), + "tar.tbz2": new(getter.TarBzip2Decompressor), + + "tar.gz": new(getter.TarGzipDecompressor), + "tgz": new(getter.TarGzipDecompressor), + + "tar.xz": new(getter.TarXzDecompressor), + "txz": new(getter.TarXzDecompressor), +} + +var goGetterGetters = map[string]getter.Getter{ + "file": new(getter.FileGetter), + "git": new(getter.GitGetter), + "hg": new(getter.HgGetter), + "s3": new(getter.S3Getter), + "http": getterHTTPGetter, + "https": getterHTTPGetter, +} + +var getterHTTPClient = cleanhttp.DefaultClient() + +var getterHTTPGetter = &getter.HttpGetter{ + Client: getterHTTPClient, + Netrc: true, +} + +// A reusingGetter is a helper for the module installer that remembers +// the final resolved addresses of all of the sources it has already been +// asked to install, and will copy from a prior installation directory if +// it has the same resolved source address. +// +// The keys in a reusingGetter are resolved and trimmed source addresses +// (with a scheme always present, and without any "subdir" component), +// and the values are the paths where each source was previously installed. +type reusingGetter map[string]string + +// getWithGoGetter retrieves the package referenced in the given address +// into the installation path and then returns the full path to any subdir +// indicated in the address. +// +// The errors returned by this function are those surfaced by the underlying +// go-getter library, which have very inconsistent quality as +// end-user-actionable error messages. At this time we do not have any +// reasonable way to improve these error messages at this layer because +// the underlying errors are not separately recognizable. +func (g reusingGetter) getWithGoGetter(instPath, addr string) (string, error) { + packageAddr, subDir := splitAddrSubdir(addr) + + log.Printf("[DEBUG] will download %q to %s", packageAddr, instPath) + + realAddr, err := getter.Detect(packageAddr, instPath, getter.Detectors) + if err != nil { + return "", err + } + + if isMaybeRelativeLocalPath(realAddr) { + return "", &MaybeRelativePathErr{addr} + } + + var realSubDir string + realAddr, realSubDir = splitAddrSubdir(realAddr) + if realSubDir != "" { + subDir = filepath.Join(realSubDir, subDir) + } + + if realAddr != packageAddr { + log.Printf("[TRACE] go-getter detectors rewrote %q to %q", packageAddr, realAddr) + } + + if prevDir, exists := g[realAddr]; exists { + log.Printf("[TRACE] copying previous install %s to %s", prevDir, instPath) + err := os.Mkdir(instPath, os.ModePerm) + if err != nil { + return "", fmt.Errorf("failed to create directory %s: %s", instPath, err) + } + err = copyDir(instPath, prevDir) + if err != nil { + return "", fmt.Errorf("failed to copy from %s to %s: %s", prevDir, instPath, err) + } + } else { + log.Printf("[TRACE] fetching %q to %q", realAddr, instPath) + client := getter.Client{ + Src: realAddr, + Dst: instPath, + Pwd: instPath, + + Mode: getter.ClientModeDir, + + Detectors: goGetterNoDetectors, // we already did detection above + Decompressors: goGetterDecompressors, + Getters: goGetterGetters, + } + err = client.Get() + if err != nil { + return "", err + } + // Remember where we installed this so we might reuse this directory + // on subsequent calls to avoid re-downloading. + g[realAddr] = instPath + } + + // Our subDir string can contain wildcards until this point, so that + // e.g. a subDir of * can expand to one top-level directory in a .tar.gz + // archive. Now that we've expanded the archive successfully we must + // resolve that into a concrete path. + var finalDir string + if subDir != "" { + finalDir, err = getter.SubdirGlob(instPath, subDir) + log.Printf("[TRACE] expanded %q to %q", subDir, finalDir) + if err != nil { + return "", err + } + } else { + finalDir = instPath + } + + // If we got this far then we have apparently succeeded in downloading + // the requested object! + return filepath.Clean(finalDir), nil +} + +// splitAddrSubdir splits the given address (which is assumed to be a +// registry address or go-getter-style address) into a package portion +// and a sub-directory portion. +// +// The package portion defines what should be downloaded and then the +// sub-directory portion, if present, specifies a sub-directory within +// the downloaded object (an archive, VCS repository, etc) that contains +// the module's configuration files. +// +// The subDir portion will be returned as empty if no subdir separator +// ("//") is present in the address. +func splitAddrSubdir(addr string) (packageAddr, subDir string) { + return getter.SourceDirSubdir(addr) +} + +var localSourcePrefixes = []string{ + "./", + "../", + ".\\", + "..\\", +} + +func isLocalSourceAddr(addr string) bool { + for _, prefix := range localSourcePrefixes { + if strings.HasPrefix(addr, prefix) { + return true + } + } + return false +} + +func isRegistrySourceAddr(addr string) bool { + _, err := regsrc.ParseModuleSource(addr) + return err == nil +} + +type MaybeRelativePathErr struct { + Addr string +} + +func (e *MaybeRelativePathErr) Error() string { + return fmt.Sprintf("Terraform cannot determine the module source for %s", e.Addr) +} + +func isMaybeRelativeLocalPath(addr string) bool { + if strings.HasPrefix(addr, "file://") { + _, err := os.Stat(addr[7:]) + if err != nil { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/inode.go b/vendor/github.com/hashicorp/terraform/internal/initwd/inode.go new file mode 100644 index 0000000000..1150b093cc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/initwd/inode.go @@ -0,0 +1,21 @@ +// +build linux darwin openbsd netbsd solaris dragonfly + +package initwd + +import ( + "fmt" + "os" + "syscall" +) + +// lookup the inode of a file on posix systems +func inode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + if st, ok := stat.Sys().(*syscall.Stat_t); ok { + return st.Ino, nil + } + return 0, fmt.Errorf("could not determine file inode") +} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/inode_freebsd.go b/vendor/github.com/hashicorp/terraform/internal/initwd/inode_freebsd.go new file mode 100644 index 0000000000..30532f54ac --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/initwd/inode_freebsd.go @@ -0,0 +1,21 @@ +// +build freebsd + +package initwd + +import ( + "fmt" + "os" + "syscall" +) + +// lookup the inode of a file on posix systems +func inode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + if st, ok := stat.Sys().(*syscall.Stat_t); ok { + return uint64(st.Ino), nil + } + return 0, fmt.Errorf("could not determine file inode") +} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/inode_windows.go b/vendor/github.com/hashicorp/terraform/internal/initwd/inode_windows.go new file mode 100644 index 0000000000..3ed58e4bf9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/initwd/inode_windows.go @@ -0,0 +1,8 @@ +// +build windows + +package initwd + +// no syscall.Stat_t on windows, return 0 for inodes +func inode(path string) (uint64, error) { + return 0, nil +} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/load_config.go b/vendor/github.com/hashicorp/terraform/internal/initwd/load_config.go new file mode 100644 index 0000000000..6f77dcd84b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/initwd/load_config.go @@ -0,0 +1,56 @@ +package initwd + +import ( + "fmt" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform/internal/earlyconfig" + "github.com/hashicorp/terraform/internal/modsdir" + "github.com/hashicorp/terraform/tfdiags" +) + +// LoadConfig loads a full configuration tree that has previously had all of +// its dependent modules installed to the given modulesDir using a +// ModuleInstaller. +// +// This uses the early configuration loader and thus only reads top-level +// metadata from the modules in the configuration. Most callers should use +// the configs/configload package to fully load a configuration. +func LoadConfig(rootDir, modulesDir string) (*earlyconfig.Config, tfdiags.Diagnostics) { + rootMod, diags := earlyconfig.LoadModule(rootDir) + if rootMod == nil { + return nil, diags + } + + manifest, err := modsdir.ReadManifestSnapshotForDir(modulesDir) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read module manifest", + fmt.Sprintf("Terraform failed to read its manifest of locally-cached modules: %s.", err), + )) + return nil, diags + } + + return earlyconfig.BuildConfig(rootMod, earlyconfig.ModuleWalkerFunc( + func(req *earlyconfig.ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + key := manifest.ModuleKey(req.Path) + record, exists := manifest[key] + if !exists { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Module not installed", + fmt.Sprintf("Module %s is not yet installed. Run \"terraform init\" to install all modules required by this configuration.", req.Path.String()), + )) + return nil, nil, diags + } + + mod, mDiags := earlyconfig.LoadModule(record.Dir) + diags = diags.Append(mDiags) + return mod, record.Version, diags + }, + )) +} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/module_install.go b/vendor/github.com/hashicorp/terraform/internal/initwd/module_install.go new file mode 100644 index 0000000000..543405b5b6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/initwd/module_install.go @@ -0,0 +1,538 @@ +package initwd + +import ( + "fmt" + "github.com/hashicorp/terraform/registry" + "log" + "os" + "path/filepath" + "strings" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/internal/earlyconfig" + "github.com/hashicorp/terraform/internal/modsdir" + "github.com/hashicorp/terraform/registry/regsrc" + "github.com/hashicorp/terraform/tfdiags" +) + +type ModuleInstaller struct { + modsDir string + reg *registry.Client +} + +func NewModuleInstaller(modsDir string, reg *registry.Client) *ModuleInstaller { + return &ModuleInstaller{ + modsDir: modsDir, + reg: reg, + } +} + +// InstallModules analyses the root module in the given directory and installs +// all of its direct and transitive dependencies into the given modules +// directory, which must already exist. +// +// Since InstallModules makes possibly-time-consuming calls to remote services, +// a hook interface is supported to allow the caller to be notified when +// each module is installed and, for remote modules, when downloading begins. +// LoadConfig guarantees that two hook calls will not happen concurrently but +// it does not guarantee any particular ordering of hook calls. This mechanism +// is for UI feedback only and does not give the caller any control over the +// process. +// +// If modules are already installed in the target directory, they will be +// skipped unless their source address or version have changed or unless +// the upgrade flag is set. +// +// InstallModules never deletes any directory, except in the case where it +// needs to replace a directory that is already present with a newly-extracted +// package. +// +// If the returned diagnostics contains errors then the module installation +// may have wholly or partially completed. Modules must be loaded in order +// to find their dependencies, so this function does many of the same checks +// as LoadConfig as a side-effect. +// +// If successful (the returned diagnostics contains no errors) then the +// first return value is the early configuration tree that was constructed by +// the installation process. +func (i *ModuleInstaller) InstallModules(rootDir string, upgrade bool, hooks ModuleInstallHooks) (*earlyconfig.Config, tfdiags.Diagnostics) { + log.Printf("[TRACE] ModuleInstaller: installing child modules for %s into %s", rootDir, i.modsDir) + + rootMod, diags := earlyconfig.LoadModule(rootDir) + if rootMod == nil { + return nil, diags + } + + manifest, err := modsdir.ReadManifestSnapshotForDir(i.modsDir) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read modules manifest file", + fmt.Sprintf("Error reading manifest for %s: %s.", i.modsDir, err), + )) + return nil, diags + } + + getter := reusingGetter{} + cfg, instDiags := i.installDescendentModules(rootMod, rootDir, manifest, upgrade, hooks, getter) + diags = append(diags, instDiags...) + + return cfg, diags +} + +func (i *ModuleInstaller) installDescendentModules(rootMod *tfconfig.Module, rootDir string, manifest modsdir.Manifest, upgrade bool, hooks ModuleInstallHooks, getter reusingGetter) (*earlyconfig.Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + if hooks == nil { + // Use our no-op implementation as a placeholder + hooks = ModuleInstallHooksImpl{} + } + + // Create a manifest record for the root module. This will be used if + // there are any relative-pathed modules in the root. + manifest[""] = modsdir.Record{ + Key: "", + Dir: rootDir, + } + + cfg, cDiags := earlyconfig.BuildConfig(rootMod, earlyconfig.ModuleWalkerFunc( + func(req *earlyconfig.ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) { + + key := manifest.ModuleKey(req.Path) + instPath := i.packageInstallPath(req.Path) + + log.Printf("[DEBUG] Module installer: begin %s", key) + + // First we'll check if we need to upgrade/replace an existing + // installed module, and delete it out of the way if so. + replace := upgrade + if !replace { + record, recorded := manifest[key] + switch { + case !recorded: + log.Printf("[TRACE] ModuleInstaller: %s is not yet installed", key) + replace = true + case record.SourceAddr != req.SourceAddr: + log.Printf("[TRACE] ModuleInstaller: %s source address has changed from %q to %q", key, record.SourceAddr, req.SourceAddr) + replace = true + case record.Version != nil && !req.VersionConstraints.Check(record.Version): + log.Printf("[TRACE] ModuleInstaller: %s version %s no longer compatible with constraints %s", key, record.Version, req.VersionConstraints) + replace = true + } + } + + // If we _are_ planning to replace this module, then we'll remove + // it now so our installation code below won't conflict with any + // existing remnants. + if replace { + if _, recorded := manifest[key]; recorded { + log.Printf("[TRACE] ModuleInstaller: discarding previous record of %s prior to reinstall", key) + } + delete(manifest, key) + // Deleting a module invalidates all of its descendent modules too. + keyPrefix := key + "." + for subKey := range manifest { + if strings.HasPrefix(subKey, keyPrefix) { + if _, recorded := manifest[subKey]; recorded { + log.Printf("[TRACE] ModuleInstaller: also discarding downstream %s", subKey) + } + delete(manifest, subKey) + } + } + } + + record, recorded := manifest[key] + if !recorded { + // Clean up any stale cache directory that might be present. + // If this is a local (relative) source then the dir will + // not exist, but we'll ignore that. + log.Printf("[TRACE] ModuleInstaller: cleaning directory %s prior to install of %s", instPath, key) + err := os.RemoveAll(instPath) + if err != nil && !os.IsNotExist(err) { + log.Printf("[TRACE] ModuleInstaller: failed to remove %s: %s", key, err) + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to remove local module cache", + fmt.Sprintf( + "Terraform tried to remove %s in order to reinstall this module, but encountered an error: %s", + instPath, err, + ), + )) + return nil, nil, diags + } + } else { + // If this module is already recorded and its root directory + // exists then we will just load what's already there and + // keep our existing record. + info, err := os.Stat(record.Dir) + if err == nil && info.IsDir() { + mod, mDiags := earlyconfig.LoadModule(record.Dir) + diags = diags.Append(mDiags) + + log.Printf("[TRACE] ModuleInstaller: Module installer: %s %s already installed in %s", key, record.Version, record.Dir) + return mod, record.Version, diags + } + } + + // If we get down here then it's finally time to actually install + // the module. There are some variants to this process depending + // on what type of module source address we have. + switch { + + case isLocalSourceAddr(req.SourceAddr): + log.Printf("[TRACE] ModuleInstaller: %s has local path %q", key, req.SourceAddr) + mod, mDiags := i.installLocalModule(req, key, manifest, hooks) + diags = append(diags, mDiags...) + return mod, nil, diags + + case isRegistrySourceAddr(req.SourceAddr): + addr, err := regsrc.ParseModuleSource(req.SourceAddr) + if err != nil { + // Should never happen because isRegistrySourceAddr already validated + panic(err) + } + log.Printf("[TRACE] ModuleInstaller: %s is a registry module at %s", key, addr) + + mod, v, mDiags := i.installRegistryModule(req, key, instPath, addr, manifest, hooks, getter) + diags = append(diags, mDiags...) + return mod, v, diags + + default: + log.Printf("[TRACE] ModuleInstaller: %s address %q will be handled by go-getter", key, req.SourceAddr) + + mod, mDiags := i.installGoGetterModule(req, key, instPath, manifest, hooks, getter) + diags = append(diags, mDiags...) + return mod, nil, diags + } + + }, + )) + diags = append(diags, cDiags...) + + err := manifest.WriteSnapshotToDir(i.modsDir) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to update module manifest", + fmt.Sprintf("Unable to write the module manifest file: %s", err), + )) + } + + return cfg, diags +} + +func (i *ModuleInstaller) installLocalModule(req *earlyconfig.ModuleRequest, key string, manifest modsdir.Manifest, hooks ModuleInstallHooks) (*tfconfig.Module, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + parentKey := manifest.ModuleKey(req.Parent.Path) + parentRecord, recorded := manifest[parentKey] + if !recorded { + // This is indicative of a bug rather than a user-actionable error + panic(fmt.Errorf("missing manifest record for parent module %s", parentKey)) + } + + if len(req.VersionConstraints) != 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid version constraint", + fmt.Sprintf("Cannot apply a version constraint to module %q (at %s:%d) because it has a relative local path.", req.Name, req.CallPos.Filename, req.CallPos.Line), + )) + } + + // For local sources we don't actually need to modify the + // filesystem at all because the parent already wrote + // the files we need, and so we just load up what's already here. + newDir := filepath.Join(parentRecord.Dir, req.SourceAddr) + log.Printf("[TRACE] ModuleInstaller: %s uses directory from parent: %s", key, newDir) + mod, mDiags := earlyconfig.LoadModule(newDir) + if mod == nil { + // nil indicates missing or unreadable directory, so we'll + // discard the returned diags and return a more specific + // error message here. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unreadable module directory", + fmt.Sprintf("The directory %s could not be read for module %q at %s:%d.", newDir, req.Name, req.CallPos.Filename, req.CallPos.Line), + )) + } else { + diags = diags.Append(mDiags) + } + + // Note the local location in our manifest. + manifest[key] = modsdir.Record{ + Key: key, + Dir: newDir, + SourceAddr: req.SourceAddr, + } + log.Printf("[DEBUG] Module installer: %s installed at %s", key, newDir) + hooks.Install(key, nil, newDir) + + return mod, diags +} + +func (i *ModuleInstaller) installRegistryModule(req *earlyconfig.ModuleRequest, key string, instPath string, addr *regsrc.Module, manifest modsdir.Manifest, hooks ModuleInstallHooks, getter reusingGetter) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + hostname, err := addr.SvcHost() + if err != nil { + // If it looks like the user was trying to use punycode then we'll generate + // a specialized error for that case. We require the unicode form of + // hostname so that hostnames are always human-readable in configuration + // and punycode can't be used to hide a malicious module hostname. + if strings.HasPrefix(addr.RawHost.Raw, "xn--") { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid module registry hostname", + fmt.Sprintf("The hostname portion of the module %q source address (at %s:%d) is not an acceptable hostname. Internationalized domain names must be given in unicode form rather than ASCII (\"punycode\") form.", req.Name, req.CallPos.Filename, req.CallPos.Line), + )) + } else { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid module registry hostname", + fmt.Sprintf("The hostname portion of the module %q source address (at %s:%d) is not a valid hostname.", req.Name, req.CallPos.Filename, req.CallPos.Line), + )) + } + return nil, nil, diags + } + + reg := i.reg + + log.Printf("[DEBUG] %s listing available versions of %s at %s", key, addr, hostname) + resp, err := reg.ModuleVersions(addr) + if err != nil { + if registry.IsModuleNotFound(err) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Module not found", + fmt.Sprintf("Module %q (from %s:%d) cannot be found in the module registry at %s.", req.Name, req.CallPos.Filename, req.CallPos.Line, hostname), + )) + } else { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Error accessing remote module registry", + fmt.Sprintf("Failed to retrieve available versions for module %q (%s:%d) from %s: %s.", req.Name, req.CallPos.Filename, req.CallPos.Line, hostname, err), + )) + } + return nil, nil, diags + } + + // The response might contain information about dependencies to allow us + // to potentially optimize future requests, but we don't currently do that + // and so for now we'll just take the first item which is guaranteed to + // be the address we requested. + if len(resp.Modules) < 1 { + // Should never happen, but since this is a remote service that may + // be implemented by third-parties we will handle it gracefully. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid response from remote module registry", + fmt.Sprintf("The registry at %s returned an invalid response when Terraform requested available versions for module %q (%s:%d).", hostname, req.Name, req.CallPos.Filename, req.CallPos.Line), + )) + return nil, nil, diags + } + + modMeta := resp.Modules[0] + + var latestMatch *version.Version + var latestVersion *version.Version + for _, mv := range modMeta.Versions { + v, err := version.NewVersion(mv.Version) + if err != nil { + // Should never happen if the registry server is compliant with + // the protocol, but we'll warn if not to assist someone who + // might be developing a module registry server. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Invalid response from remote module registry", + fmt.Sprintf("The registry at %s returned an invalid version string %q for module %q (%s:%d), which Terraform ignored.", hostname, mv.Version, req.Name, req.CallPos.Filename, req.CallPos.Line), + )) + continue + } + + // If we've found a pre-release version then we'll ignore it unless + // it was exactly requested. + if v.Prerelease() != "" && req.VersionConstraints.String() != v.String() { + log.Printf("[TRACE] ModuleInstaller: %s ignoring %s because it is a pre-release and was not requested exactly", key, v) + continue + } + + if latestVersion == nil || v.GreaterThan(latestVersion) { + latestVersion = v + } + + if req.VersionConstraints.Check(v) { + if latestMatch == nil || v.GreaterThan(latestMatch) { + latestMatch = v + } + } + } + + if latestVersion == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Module has no versions", + fmt.Sprintf("Module %q (%s:%d) has no versions available on %s.", addr, req.CallPos.Filename, req.CallPos.Line, hostname), + )) + return nil, nil, diags + } + + if latestMatch == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unresolvable module version constraint", + fmt.Sprintf("There is no available version of module %q (%s:%d) which matches the given version constraint. The newest available version is %s.", addr, req.CallPos.Filename, req.CallPos.Line, latestVersion), + )) + return nil, nil, diags + } + + // Report up to the caller that we're about to start downloading. + packageAddr, _ := splitAddrSubdir(req.SourceAddr) + hooks.Download(key, packageAddr, latestMatch) + + // If we manage to get down here then we've found a suitable version to + // install, so we need to ask the registry where we should download it from. + // The response to this is a go-getter-style address string. + dlAddr, err := reg.ModuleLocation(addr, latestMatch.String()) + if err != nil { + log.Printf("[ERROR] %s from %s %s: %s", key, addr, latestMatch, err) + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid response from remote module registry", + fmt.Sprintf("The remote registry at %s failed to return a download URL for %s %s.", hostname, addr, latestMatch), + )) + return nil, nil, diags + } + + log.Printf("[TRACE] ModuleInstaller: %s %s %s is available at %q", key, addr, latestMatch, dlAddr) + + modDir, err := getter.getWithGoGetter(instPath, dlAddr) + if err != nil { + // Errors returned by go-getter have very inconsistent quality as + // end-user error messages, but for now we're accepting that because + // we have no way to recognize any specific errors to improve them + // and masking the error entirely would hide valuable diagnostic + // information from the user. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to download module", + fmt.Sprintf("Error attempting to download module %q (%s:%d) source code from %q: %s.", req.Name, req.CallPos.Filename, req.CallPos.Line, dlAddr, err), + )) + return nil, nil, diags + } + + log.Printf("[TRACE] ModuleInstaller: %s %q was downloaded to %s", key, dlAddr, modDir) + + if addr.RawSubmodule != "" { + // Append the user's requested subdirectory to any subdirectory that + // was implied by any of the nested layers we expanded within go-getter. + modDir = filepath.Join(modDir, addr.RawSubmodule) + } + + log.Printf("[TRACE] ModuleInstaller: %s should now be at %s", key, modDir) + + // Finally we are ready to try actually loading the module. + mod, mDiags := earlyconfig.LoadModule(modDir) + if mod == nil { + // nil indicates missing or unreadable directory, so we'll + // discard the returned diags and return a more specific + // error message here. For registry modules this actually + // indicates a bug in the code above, since it's not the + // user's responsibility to create the directory in this case. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unreadable module directory", + fmt.Sprintf("The directory %s could not be read. This is a bug in Terraform and should be reported.", modDir), + )) + } else { + diags = append(diags, mDiags...) + } + + // Note the local location in our manifest. + manifest[key] = modsdir.Record{ + Key: key, + Version: latestMatch, + Dir: modDir, + SourceAddr: req.SourceAddr, + } + log.Printf("[DEBUG] Module installer: %s installed at %s", key, modDir) + hooks.Install(key, latestMatch, modDir) + + return mod, latestMatch, diags +} + +func (i *ModuleInstaller) installGoGetterModule(req *earlyconfig.ModuleRequest, key string, instPath string, manifest modsdir.Manifest, hooks ModuleInstallHooks, getter reusingGetter) (*tfconfig.Module, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Report up to the caller that we're about to start downloading. + packageAddr, _ := splitAddrSubdir(req.SourceAddr) + hooks.Download(key, packageAddr, nil) + + modDir, err := getter.getWithGoGetter(instPath, req.SourceAddr) + if err != nil { + if err, ok := err.(*MaybeRelativePathErr); ok { + log.Printf( + "[TRACE] ModuleInstaller: %s looks like a local path but is missing ./ or ../", + req.SourceAddr, + ) + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Module not found", + fmt.Sprintf( + "The module address %q could not be resolved.\n\n"+ + "If you intended this as a path relative to the current "+ + "module, use \"./%s\" instead. The \"./\" prefix "+ + "indicates that the address is a relative filesystem path.", + req.SourceAddr, req.SourceAddr, + ), + )) + } else { + // Errors returned by go-getter have very inconsistent quality as + // end-user error messages, but for now we're accepting that because + // we have no way to recognize any specific errors to improve them + // and masking the error entirely would hide valuable diagnostic + // information from the user. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to download module", + fmt.Sprintf("Error attempting to download module %q (%s:%d) source code from %q: %s", req.Name, req.CallPos.Filename, req.CallPos.Line, packageAddr, err), + )) + } + return nil, diags + + } + + log.Printf("[TRACE] ModuleInstaller: %s %q was downloaded to %s", key, req.SourceAddr, modDir) + + mod, mDiags := earlyconfig.LoadModule(modDir) + if mod == nil { + // nil indicates missing or unreadable directory, so we'll + // discard the returned diags and return a more specific + // error message here. For go-getter modules this actually + // indicates a bug in the code above, since it's not the + // user's responsibility to create the directory in this case. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unreadable module directory", + fmt.Sprintf("The directory %s could not be read. This is a bug in Terraform and should be reported.", modDir), + )) + } else { + diags = append(diags, mDiags...) + } + + // Note the local location in our manifest. + manifest[key] = modsdir.Record{ + Key: key, + Dir: modDir, + SourceAddr: req.SourceAddr, + } + log.Printf("[DEBUG] Module installer: %s installed at %s", key, modDir) + hooks.Install(key, nil, modDir) + + return mod, diags +} + +func (i *ModuleInstaller) packageInstallPath(modulePath addrs.Module) string { + return filepath.Join(i.modsDir, strings.Join(modulePath, ".")) +} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/module_install_hooks.go b/vendor/github.com/hashicorp/terraform/internal/initwd/module_install_hooks.go new file mode 100644 index 0000000000..817a6dc832 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/initwd/module_install_hooks.go @@ -0,0 +1,36 @@ +package initwd + +import ( + version "github.com/hashicorp/go-version" +) + +// ModuleInstallHooks is an interface used to provide notifications about the +// installation process being orchestrated by InstallModules. +// +// This interface may have new methods added in future, so implementers should +// embed InstallHooksImpl to get no-op implementations of any unimplemented +// methods. +type ModuleInstallHooks interface { + // Download is called for modules that are retrieved from a remote source + // before that download begins, to allow a caller to give feedback + // on progress through a possibly-long sequence of downloads. + Download(moduleAddr, packageAddr string, version *version.Version) + + // Install is called for each module that is installed, even if it did + // not need to be downloaded from a remote source. + Install(moduleAddr string, version *version.Version, localPath string) +} + +// ModuleInstallHooksImpl is a do-nothing implementation of InstallHooks that +// can be embedded in another implementation struct to allow only partial +// implementation of the interface. +type ModuleInstallHooksImpl struct { +} + +func (h ModuleInstallHooksImpl) Download(moduleAddr, packageAddr string, version *version.Version) { +} + +func (h ModuleInstallHooksImpl) Install(moduleAddr string, version *version.Version, localPath string) { +} + +var _ ModuleInstallHooks = ModuleInstallHooksImpl{} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/testing.go b/vendor/github.com/hashicorp/terraform/internal/initwd/testing.go new file mode 100644 index 0000000000..8cef80a35b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/initwd/testing.go @@ -0,0 +1,73 @@ +package initwd + +import ( + "github.com/hashicorp/terraform/registry" + "testing" + + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configload" + "github.com/hashicorp/terraform/tfdiags" +) + +// LoadConfigForTests is a convenience wrapper around configload.NewLoaderForTests, +// ModuleInstaller.InstallModules and configload.Loader.LoadConfig that allows +// a test configuration to be loaded in a single step. +// +// If module installation fails, t.Fatal (or similar) is called to halt +// execution of the test, under the assumption that installation failures are +// not expected. If installation failures _are_ expected then use +// NewLoaderForTests and work with the loader object directly. If module +// installation succeeds but generates warnings, these warnings are discarded. +// +// If installation succeeds but errors are detected during loading then a +// possibly-incomplete config is returned along with error diagnostics. The +// test run is not aborted in this case, so that the caller can make assertions +// against the returned diagnostics. +// +// As with NewLoaderForTests, a cleanup function is returned which must be +// called before the test completes in order to remove the temporary +// modules directory. +func LoadConfigForTests(t *testing.T, rootDir string) (*configs.Config, *configload.Loader, func(), tfdiags.Diagnostics) { + t.Helper() + + var diags tfdiags.Diagnostics + + loader, cleanup := configload.NewLoaderForTests(t) + inst := NewModuleInstaller(loader.ModulesDir(), registry.NewClient(nil, nil)) + + _, moreDiags := inst.InstallModules(rootDir, true, ModuleInstallHooksImpl{}) + diags = diags.Append(moreDiags) + if diags.HasErrors() { + cleanup() + t.Fatal(diags.Err()) + return nil, nil, func() {}, diags + } + + // Since module installer has modified the module manifest on disk, we need + // to refresh the cache of it in the loader. + if err := loader.RefreshModules(); err != nil { + t.Fatalf("failed to refresh modules after installation: %s", err) + } + + config, hclDiags := loader.LoadConfig(rootDir) + diags = diags.Append(hclDiags) + return config, loader, cleanup, diags +} + +// MustLoadConfigForTests is a variant of LoadConfigForTests which calls +// t.Fatal (or similar) if there are any errors during loading, and thus +// does not return diagnostics at all. +// +// This is useful for concisely writing tests that don't expect errors at +// all. For tests that expect errors and need to assert against them, use +// LoadConfigForTests instead. +func MustLoadConfigForTests(t *testing.T, rootDir string) (*configs.Config, *configload.Loader, func()) { + t.Helper() + + config, loader, cleanup, diags := LoadConfigForTests(t, rootDir) + if diags.HasErrors() { + cleanup() + t.Fatal(diags.Err()) + } + return config, loader, cleanup +} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/version_required.go b/vendor/github.com/hashicorp/terraform/internal/initwd/version_required.go new file mode 100644 index 0000000000..104840b931 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/initwd/version_required.go @@ -0,0 +1,83 @@ +package initwd + +import ( + "fmt" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform/internal/earlyconfig" + "github.com/hashicorp/terraform/tfdiags" + tfversion "github.com/hashicorp/terraform/version" +) + +// CheckCoreVersionRequirements visits each of the modules in the given +// early configuration tree and verifies that any given Core version constraints +// match with the version of Terraform Core that is being used. +// +// The returned diagnostics will contain errors if any constraints do not match. +// The returned diagnostics might also return warnings, which should be +// displayed to the user. +func CheckCoreVersionRequirements(earlyConfig *earlyconfig.Config) tfdiags.Diagnostics { + if earlyConfig == nil { + return nil + } + + var diags tfdiags.Diagnostics + module := earlyConfig.Module + + var constraints version.Constraints + for _, constraintStr := range module.RequiredCore { + constraint, err := version.NewConstraint(constraintStr) + if err != nil { + // Unfortunately the early config parser doesn't preserve a source + // location for this, so we're unable to indicate a specific + // location where this constraint came from, but we can at least + // say which module set it. + switch { + case len(earlyConfig.Path) == 0: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider version constraint", + fmt.Sprintf("Invalid version core constraint %q in the root module.", constraintStr), + )) + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider version constraint", + fmt.Sprintf("Invalid version core constraint %q in %s.", constraintStr, earlyConfig.Path), + )) + } + continue + } + constraints = append(constraints, constraint...) + } + + if !constraints.Check(tfversion.SemVer) { + switch { + case len(earlyConfig.Path) == 0: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unsupported Terraform Core version", + fmt.Sprintf( + "This configuration does not support Terraform version %s. To proceed, either choose another supported Terraform version or update the root module's version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", + tfversion.String(), + ), + )) + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unsupported Terraform Core version", + fmt.Sprintf( + "Module %s (from %q) does not support Terraform version %s. To proceed, either choose another supported Terraform version or update the module's version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", + earlyConfig.Path, earlyConfig.SourceAddr, tfversion.String(), + ), + )) + } + } + + for _, c := range earlyConfig.Children { + childDiags := CheckCoreVersionRequirements(c) + diags = diags.Append(childDiags) + } + + return diags +} diff --git a/vendor/github.com/hashicorp/terraform/internal/modsdir/doc.go b/vendor/github.com/hashicorp/terraform/internal/modsdir/doc.go new file mode 100644 index 0000000000..0d7d664fc1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/modsdir/doc.go @@ -0,0 +1,3 @@ +// Package modsdir is an internal package containing the model types used to +// represent the manifest of modules in a local modules cache directory. +package modsdir diff --git a/vendor/github.com/hashicorp/terraform/internal/modsdir/manifest.go b/vendor/github.com/hashicorp/terraform/internal/modsdir/manifest.go new file mode 100644 index 0000000000..36f6c033f8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/modsdir/manifest.go @@ -0,0 +1,138 @@ +package modsdir + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + + version "github.com/hashicorp/go-version" + + "github.com/hashicorp/terraform/addrs" +) + +// Record represents some metadata about an installed module, as part +// of a ModuleManifest. +type Record struct { + // Key is a unique identifier for this particular module, based on its + // position within the static module tree. + Key string `json:"Key"` + + // SourceAddr is the source address given for this module in configuration. + // This is used only to detect if the source was changed in configuration + // since the module was last installed, which means that the installer + // must re-install it. + SourceAddr string `json:"Source"` + + // Version is the exact version of the module, which results from parsing + // VersionStr. nil for un-versioned modules. + Version *version.Version `json:"-"` + + // VersionStr is the version specifier string. This is used only for + // serialization in snapshots and should not be accessed or updated + // by any other codepaths; use "Version" instead. + VersionStr string `json:"Version,omitempty"` + + // Dir is the path to the local directory where the module is installed. + Dir string `json:"Dir"` +} + +// Manifest is a map used to keep track of the filesystem locations +// and other metadata about installed modules. +// +// The configuration loader refers to this, while the module installer updates +// it to reflect any changes to the installed modules. +type Manifest map[string]Record + +func (m Manifest) ModuleKey(path addrs.Module) string { + return path.String() +} + +// manifestSnapshotFile is an internal struct used only to assist in our JSON +// serialization of manifest snapshots. It should not be used for any other +// purpose. +type manifestSnapshotFile struct { + Records []Record `json:"Modules"` +} + +func ReadManifestSnapshot(r io.Reader) (Manifest, error) { + src, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + if len(src) == 0 { + // This should never happen, but we'll tolerate it as if it were + // a valid empty JSON object. + return make(Manifest), nil + } + + var read manifestSnapshotFile + err = json.Unmarshal(src, &read) + + new := make(Manifest) + for _, record := range read.Records { + if record.VersionStr != "" { + record.Version, err = version.NewVersion(record.VersionStr) + if err != nil { + return nil, fmt.Errorf("invalid version %q for %s: %s", record.VersionStr, record.Key, err) + } + } + if _, exists := new[record.Key]; exists { + // This should never happen in any valid file, so we'll catch it + // and report it to avoid confusing/undefined behavior if the + // snapshot file was edited incorrectly outside of Terraform. + return nil, fmt.Errorf("snapshot file contains two records for path %s", record.Key) + } + new[record.Key] = record + } + return new, nil +} + +func ReadManifestSnapshotForDir(dir string) (Manifest, error) { + fn := filepath.Join(dir, ManifestSnapshotFilename) + r, err := os.Open(fn) + if err != nil { + if os.IsNotExist(err) { + return make(Manifest), nil // missing file is okay and treated as empty + } + return nil, err + } + return ReadManifestSnapshot(r) +} + +func (m Manifest) WriteSnapshot(w io.Writer) error { + var write manifestSnapshotFile + + for _, record := range m { + // Make sure VersionStr is in sync with Version, since we encourage + // callers to manipulate Version and ignore VersionStr. + if record.Version != nil { + record.VersionStr = record.Version.String() + } else { + record.VersionStr = "" + } + write.Records = append(write.Records, record) + } + + src, err := json.Marshal(write) + if err != nil { + return err + } + + _, err = w.Write(src) + return err +} + +func (m Manifest) WriteSnapshotToDir(dir string) error { + fn := filepath.Join(dir, ManifestSnapshotFilename) + log.Printf("[TRACE] modsdir: writing modules manifest to %s", fn) + w, err := os.Create(fn) + if err != nil { + return err + } + return m.WriteSnapshot(w) +} diff --git a/vendor/github.com/hashicorp/terraform/internal/modsdir/paths.go b/vendor/github.com/hashicorp/terraform/internal/modsdir/paths.go new file mode 100644 index 0000000000..9ebb52431b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/modsdir/paths.go @@ -0,0 +1,3 @@ +package modsdir + +const ManifestSnapshotFilename = "modules.json" diff --git a/vendor/github.com/hashicorp/terraform/internal/tfplugin5/generate.sh b/vendor/github.com/hashicorp/terraform/internal/tfplugin5/generate.sh new file mode 100644 index 0000000000..de1d693ca4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/tfplugin5/generate.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +# We do not run protoc under go:generate because we want to ensure that all +# dependencies of go:generate are "go get"-able for general dev environment +# usability. To compile all protobuf files in this repository, run +# "make protobuf" at the top-level. + +set -eu + +SOURCE="${BASH_SOURCE[0]}" +while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done +DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" + +cd "$DIR" + +protoc -I ./ tfplugin5.proto --go_out=plugins=grpc:./ diff --git a/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.pb.go b/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.pb.go new file mode 100644 index 0000000000..87a6bec75c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.pb.go @@ -0,0 +1,3455 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tfplugin5.proto + +package tfplugin5 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Diagnostic_Severity int32 + +const ( + Diagnostic_INVALID Diagnostic_Severity = 0 + Diagnostic_ERROR Diagnostic_Severity = 1 + Diagnostic_WARNING Diagnostic_Severity = 2 +) + +var Diagnostic_Severity_name = map[int32]string{ + 0: "INVALID", + 1: "ERROR", + 2: "WARNING", +} +var Diagnostic_Severity_value = map[string]int32{ + "INVALID": 0, + "ERROR": 1, + "WARNING": 2, +} + +func (x Diagnostic_Severity) String() string { + return proto.EnumName(Diagnostic_Severity_name, int32(x)) +} +func (Diagnostic_Severity) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{1, 0} +} + +type Schema_NestedBlock_NestingMode int32 + +const ( + Schema_NestedBlock_INVALID Schema_NestedBlock_NestingMode = 0 + Schema_NestedBlock_SINGLE Schema_NestedBlock_NestingMode = 1 + Schema_NestedBlock_LIST Schema_NestedBlock_NestingMode = 2 + Schema_NestedBlock_SET Schema_NestedBlock_NestingMode = 3 + Schema_NestedBlock_MAP Schema_NestedBlock_NestingMode = 4 + Schema_NestedBlock_GROUP Schema_NestedBlock_NestingMode = 5 +) + +var Schema_NestedBlock_NestingMode_name = map[int32]string{ + 0: "INVALID", + 1: "SINGLE", + 2: "LIST", + 3: "SET", + 4: "MAP", + 5: "GROUP", +} +var Schema_NestedBlock_NestingMode_value = map[string]int32{ + "INVALID": 0, + "SINGLE": 1, + "LIST": 2, + "SET": 3, + "MAP": 4, + "GROUP": 5, +} + +func (x Schema_NestedBlock_NestingMode) String() string { + return proto.EnumName(Schema_NestedBlock_NestingMode_name, int32(x)) +} +func (Schema_NestedBlock_NestingMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{5, 2, 0} +} + +// DynamicValue is an opaque encoding of terraform data, with the field name +// indicating the encoding scheme used. +type DynamicValue struct { + Msgpack []byte `protobuf:"bytes,1,opt,name=msgpack,proto3" json:"msgpack,omitempty"` + Json []byte `protobuf:"bytes,2,opt,name=json,proto3" json:"json,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DynamicValue) Reset() { *m = DynamicValue{} } +func (m *DynamicValue) String() string { return proto.CompactTextString(m) } +func (*DynamicValue) ProtoMessage() {} +func (*DynamicValue) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{0} +} +func (m *DynamicValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DynamicValue.Unmarshal(m, b) +} +func (m *DynamicValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DynamicValue.Marshal(b, m, deterministic) +} +func (dst *DynamicValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DynamicValue.Merge(dst, src) +} +func (m *DynamicValue) XXX_Size() int { + return xxx_messageInfo_DynamicValue.Size(m) +} +func (m *DynamicValue) XXX_DiscardUnknown() { + xxx_messageInfo_DynamicValue.DiscardUnknown(m) +} + +var xxx_messageInfo_DynamicValue proto.InternalMessageInfo + +func (m *DynamicValue) GetMsgpack() []byte { + if m != nil { + return m.Msgpack + } + return nil +} + +func (m *DynamicValue) GetJson() []byte { + if m != nil { + return m.Json + } + return nil +} + +type Diagnostic struct { + Severity Diagnostic_Severity `protobuf:"varint,1,opt,name=severity,proto3,enum=tfplugin5.Diagnostic_Severity" json:"severity,omitempty"` + Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` + Detail string `protobuf:"bytes,3,opt,name=detail,proto3" json:"detail,omitempty"` + Attribute *AttributePath `protobuf:"bytes,4,opt,name=attribute,proto3" json:"attribute,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Diagnostic) Reset() { *m = Diagnostic{} } +func (m *Diagnostic) String() string { return proto.CompactTextString(m) } +func (*Diagnostic) ProtoMessage() {} +func (*Diagnostic) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{1} +} +func (m *Diagnostic) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Diagnostic.Unmarshal(m, b) +} +func (m *Diagnostic) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Diagnostic.Marshal(b, m, deterministic) +} +func (dst *Diagnostic) XXX_Merge(src proto.Message) { + xxx_messageInfo_Diagnostic.Merge(dst, src) +} +func (m *Diagnostic) XXX_Size() int { + return xxx_messageInfo_Diagnostic.Size(m) +} +func (m *Diagnostic) XXX_DiscardUnknown() { + xxx_messageInfo_Diagnostic.DiscardUnknown(m) +} + +var xxx_messageInfo_Diagnostic proto.InternalMessageInfo + +func (m *Diagnostic) GetSeverity() Diagnostic_Severity { + if m != nil { + return m.Severity + } + return Diagnostic_INVALID +} + +func (m *Diagnostic) GetSummary() string { + if m != nil { + return m.Summary + } + return "" +} + +func (m *Diagnostic) GetDetail() string { + if m != nil { + return m.Detail + } + return "" +} + +func (m *Diagnostic) GetAttribute() *AttributePath { + if m != nil { + return m.Attribute + } + return nil +} + +type AttributePath struct { + Steps []*AttributePath_Step `protobuf:"bytes,1,rep,name=steps,proto3" json:"steps,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AttributePath) Reset() { *m = AttributePath{} } +func (m *AttributePath) String() string { return proto.CompactTextString(m) } +func (*AttributePath) ProtoMessage() {} +func (*AttributePath) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{2} +} +func (m *AttributePath) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AttributePath.Unmarshal(m, b) +} +func (m *AttributePath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AttributePath.Marshal(b, m, deterministic) +} +func (dst *AttributePath) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttributePath.Merge(dst, src) +} +func (m *AttributePath) XXX_Size() int { + return xxx_messageInfo_AttributePath.Size(m) +} +func (m *AttributePath) XXX_DiscardUnknown() { + xxx_messageInfo_AttributePath.DiscardUnknown(m) +} + +var xxx_messageInfo_AttributePath proto.InternalMessageInfo + +func (m *AttributePath) GetSteps() []*AttributePath_Step { + if m != nil { + return m.Steps + } + return nil +} + +type AttributePath_Step struct { + // Types that are valid to be assigned to Selector: + // *AttributePath_Step_AttributeName + // *AttributePath_Step_ElementKeyString + // *AttributePath_Step_ElementKeyInt + Selector isAttributePath_Step_Selector `protobuf_oneof:"selector"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AttributePath_Step) Reset() { *m = AttributePath_Step{} } +func (m *AttributePath_Step) String() string { return proto.CompactTextString(m) } +func (*AttributePath_Step) ProtoMessage() {} +func (*AttributePath_Step) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{2, 0} +} +func (m *AttributePath_Step) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AttributePath_Step.Unmarshal(m, b) +} +func (m *AttributePath_Step) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AttributePath_Step.Marshal(b, m, deterministic) +} +func (dst *AttributePath_Step) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttributePath_Step.Merge(dst, src) +} +func (m *AttributePath_Step) XXX_Size() int { + return xxx_messageInfo_AttributePath_Step.Size(m) +} +func (m *AttributePath_Step) XXX_DiscardUnknown() { + xxx_messageInfo_AttributePath_Step.DiscardUnknown(m) +} + +var xxx_messageInfo_AttributePath_Step proto.InternalMessageInfo + +type isAttributePath_Step_Selector interface { + isAttributePath_Step_Selector() +} + +type AttributePath_Step_AttributeName struct { + AttributeName string `protobuf:"bytes,1,opt,name=attribute_name,json=attributeName,proto3,oneof"` +} + +type AttributePath_Step_ElementKeyString struct { + ElementKeyString string `protobuf:"bytes,2,opt,name=element_key_string,json=elementKeyString,proto3,oneof"` +} + +type AttributePath_Step_ElementKeyInt struct { + ElementKeyInt int64 `protobuf:"varint,3,opt,name=element_key_int,json=elementKeyInt,proto3,oneof"` +} + +func (*AttributePath_Step_AttributeName) isAttributePath_Step_Selector() {} + +func (*AttributePath_Step_ElementKeyString) isAttributePath_Step_Selector() {} + +func (*AttributePath_Step_ElementKeyInt) isAttributePath_Step_Selector() {} + +func (m *AttributePath_Step) GetSelector() isAttributePath_Step_Selector { + if m != nil { + return m.Selector + } + return nil +} + +func (m *AttributePath_Step) GetAttributeName() string { + if x, ok := m.GetSelector().(*AttributePath_Step_AttributeName); ok { + return x.AttributeName + } + return "" +} + +func (m *AttributePath_Step) GetElementKeyString() string { + if x, ok := m.GetSelector().(*AttributePath_Step_ElementKeyString); ok { + return x.ElementKeyString + } + return "" +} + +func (m *AttributePath_Step) GetElementKeyInt() int64 { + if x, ok := m.GetSelector().(*AttributePath_Step_ElementKeyInt); ok { + return x.ElementKeyInt + } + return 0 +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*AttributePath_Step) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _AttributePath_Step_OneofMarshaler, _AttributePath_Step_OneofUnmarshaler, _AttributePath_Step_OneofSizer, []interface{}{ + (*AttributePath_Step_AttributeName)(nil), + (*AttributePath_Step_ElementKeyString)(nil), + (*AttributePath_Step_ElementKeyInt)(nil), + } +} + +func _AttributePath_Step_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*AttributePath_Step) + // selector + switch x := m.Selector.(type) { + case *AttributePath_Step_AttributeName: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeStringBytes(x.AttributeName) + case *AttributePath_Step_ElementKeyString: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.ElementKeyString) + case *AttributePath_Step_ElementKeyInt: + b.EncodeVarint(3<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.ElementKeyInt)) + case nil: + default: + return fmt.Errorf("AttributePath_Step.Selector has unexpected type %T", x) + } + return nil +} + +func _AttributePath_Step_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*AttributePath_Step) + switch tag { + case 1: // selector.attribute_name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Selector = &AttributePath_Step_AttributeName{x} + return true, err + case 2: // selector.element_key_string + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Selector = &AttributePath_Step_ElementKeyString{x} + return true, err + case 3: // selector.element_key_int + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Selector = &AttributePath_Step_ElementKeyInt{int64(x)} + return true, err + default: + return false, nil + } +} + +func _AttributePath_Step_OneofSizer(msg proto.Message) (n int) { + m := msg.(*AttributePath_Step) + // selector + switch x := m.Selector.(type) { + case *AttributePath_Step_AttributeName: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.AttributeName))) + n += len(x.AttributeName) + case *AttributePath_Step_ElementKeyString: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.ElementKeyString))) + n += len(x.ElementKeyString) + case *AttributePath_Step_ElementKeyInt: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.ElementKeyInt)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Stop struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Stop) Reset() { *m = Stop{} } +func (m *Stop) String() string { return proto.CompactTextString(m) } +func (*Stop) ProtoMessage() {} +func (*Stop) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{3} +} +func (m *Stop) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Stop.Unmarshal(m, b) +} +func (m *Stop) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Stop.Marshal(b, m, deterministic) +} +func (dst *Stop) XXX_Merge(src proto.Message) { + xxx_messageInfo_Stop.Merge(dst, src) +} +func (m *Stop) XXX_Size() int { + return xxx_messageInfo_Stop.Size(m) +} +func (m *Stop) XXX_DiscardUnknown() { + xxx_messageInfo_Stop.DiscardUnknown(m) +} + +var xxx_messageInfo_Stop proto.InternalMessageInfo + +type Stop_Request struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Stop_Request) Reset() { *m = Stop_Request{} } +func (m *Stop_Request) String() string { return proto.CompactTextString(m) } +func (*Stop_Request) ProtoMessage() {} +func (*Stop_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{3, 0} +} +func (m *Stop_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Stop_Request.Unmarshal(m, b) +} +func (m *Stop_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Stop_Request.Marshal(b, m, deterministic) +} +func (dst *Stop_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Stop_Request.Merge(dst, src) +} +func (m *Stop_Request) XXX_Size() int { + return xxx_messageInfo_Stop_Request.Size(m) +} +func (m *Stop_Request) XXX_DiscardUnknown() { + xxx_messageInfo_Stop_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Stop_Request proto.InternalMessageInfo + +type Stop_Response struct { + Error string `protobuf:"bytes,1,opt,name=Error,proto3" json:"Error,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Stop_Response) Reset() { *m = Stop_Response{} } +func (m *Stop_Response) String() string { return proto.CompactTextString(m) } +func (*Stop_Response) ProtoMessage() {} +func (*Stop_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{3, 1} +} +func (m *Stop_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Stop_Response.Unmarshal(m, b) +} +func (m *Stop_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Stop_Response.Marshal(b, m, deterministic) +} +func (dst *Stop_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_Stop_Response.Merge(dst, src) +} +func (m *Stop_Response) XXX_Size() int { + return xxx_messageInfo_Stop_Response.Size(m) +} +func (m *Stop_Response) XXX_DiscardUnknown() { + xxx_messageInfo_Stop_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_Stop_Response proto.InternalMessageInfo + +func (m *Stop_Response) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +// RawState holds the stored state for a resource to be upgraded by the +// provider. It can be in one of two formats, the current json encoded format +// in bytes, or the legacy flatmap format as a map of strings. +type RawState struct { + Json []byte `protobuf:"bytes,1,opt,name=json,proto3" json:"json,omitempty"` + Flatmap map[string]string `protobuf:"bytes,2,rep,name=flatmap,proto3" json:"flatmap,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RawState) Reset() { *m = RawState{} } +func (m *RawState) String() string { return proto.CompactTextString(m) } +func (*RawState) ProtoMessage() {} +func (*RawState) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{4} +} +func (m *RawState) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RawState.Unmarshal(m, b) +} +func (m *RawState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RawState.Marshal(b, m, deterministic) +} +func (dst *RawState) XXX_Merge(src proto.Message) { + xxx_messageInfo_RawState.Merge(dst, src) +} +func (m *RawState) XXX_Size() int { + return xxx_messageInfo_RawState.Size(m) +} +func (m *RawState) XXX_DiscardUnknown() { + xxx_messageInfo_RawState.DiscardUnknown(m) +} + +var xxx_messageInfo_RawState proto.InternalMessageInfo + +func (m *RawState) GetJson() []byte { + if m != nil { + return m.Json + } + return nil +} + +func (m *RawState) GetFlatmap() map[string]string { + if m != nil { + return m.Flatmap + } + return nil +} + +// Schema is the configuration schema for a Resource, Provider, or Provisioner. +type Schema struct { + // The version of the schema. + // Schemas are versioned, so that providers can upgrade a saved resource + // state when the schema is changed. + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + // Block is the top level configuration block for this schema. + Block *Schema_Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Schema) Reset() { *m = Schema{} } +func (m *Schema) String() string { return proto.CompactTextString(m) } +func (*Schema) ProtoMessage() {} +func (*Schema) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{5} +} +func (m *Schema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Schema.Unmarshal(m, b) +} +func (m *Schema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Schema.Marshal(b, m, deterministic) +} +func (dst *Schema) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schema.Merge(dst, src) +} +func (m *Schema) XXX_Size() int { + return xxx_messageInfo_Schema.Size(m) +} +func (m *Schema) XXX_DiscardUnknown() { + xxx_messageInfo_Schema.DiscardUnknown(m) +} + +var xxx_messageInfo_Schema proto.InternalMessageInfo + +func (m *Schema) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *Schema) GetBlock() *Schema_Block { + if m != nil { + return m.Block + } + return nil +} + +type Schema_Block struct { + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + Attributes []*Schema_Attribute `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty"` + BlockTypes []*Schema_NestedBlock `protobuf:"bytes,3,rep,name=block_types,json=blockTypes,proto3" json:"block_types,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Schema_Block) Reset() { *m = Schema_Block{} } +func (m *Schema_Block) String() string { return proto.CompactTextString(m) } +func (*Schema_Block) ProtoMessage() {} +func (*Schema_Block) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{5, 0} +} +func (m *Schema_Block) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Schema_Block.Unmarshal(m, b) +} +func (m *Schema_Block) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Schema_Block.Marshal(b, m, deterministic) +} +func (dst *Schema_Block) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schema_Block.Merge(dst, src) +} +func (m *Schema_Block) XXX_Size() int { + return xxx_messageInfo_Schema_Block.Size(m) +} +func (m *Schema_Block) XXX_DiscardUnknown() { + xxx_messageInfo_Schema_Block.DiscardUnknown(m) +} + +var xxx_messageInfo_Schema_Block proto.InternalMessageInfo + +func (m *Schema_Block) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *Schema_Block) GetAttributes() []*Schema_Attribute { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Schema_Block) GetBlockTypes() []*Schema_NestedBlock { + if m != nil { + return m.BlockTypes + } + return nil +} + +type Schema_Attribute struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Type []byte `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` + Optional bool `protobuf:"varint,5,opt,name=optional,proto3" json:"optional,omitempty"` + Computed bool `protobuf:"varint,6,opt,name=computed,proto3" json:"computed,omitempty"` + Sensitive bool `protobuf:"varint,7,opt,name=sensitive,proto3" json:"sensitive,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Schema_Attribute) Reset() { *m = Schema_Attribute{} } +func (m *Schema_Attribute) String() string { return proto.CompactTextString(m) } +func (*Schema_Attribute) ProtoMessage() {} +func (*Schema_Attribute) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{5, 1} +} +func (m *Schema_Attribute) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Schema_Attribute.Unmarshal(m, b) +} +func (m *Schema_Attribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Schema_Attribute.Marshal(b, m, deterministic) +} +func (dst *Schema_Attribute) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schema_Attribute.Merge(dst, src) +} +func (m *Schema_Attribute) XXX_Size() int { + return xxx_messageInfo_Schema_Attribute.Size(m) +} +func (m *Schema_Attribute) XXX_DiscardUnknown() { + xxx_messageInfo_Schema_Attribute.DiscardUnknown(m) +} + +var xxx_messageInfo_Schema_Attribute proto.InternalMessageInfo + +func (m *Schema_Attribute) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Schema_Attribute) GetType() []byte { + if m != nil { + return m.Type + } + return nil +} + +func (m *Schema_Attribute) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Schema_Attribute) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *Schema_Attribute) GetOptional() bool { + if m != nil { + return m.Optional + } + return false +} + +func (m *Schema_Attribute) GetComputed() bool { + if m != nil { + return m.Computed + } + return false +} + +func (m *Schema_Attribute) GetSensitive() bool { + if m != nil { + return m.Sensitive + } + return false +} + +type Schema_NestedBlock struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Block *Schema_Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"` + Nesting Schema_NestedBlock_NestingMode `protobuf:"varint,3,opt,name=nesting,proto3,enum=tfplugin5.Schema_NestedBlock_NestingMode" json:"nesting,omitempty"` + MinItems int64 `protobuf:"varint,4,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + MaxItems int64 `protobuf:"varint,5,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Schema_NestedBlock) Reset() { *m = Schema_NestedBlock{} } +func (m *Schema_NestedBlock) String() string { return proto.CompactTextString(m) } +func (*Schema_NestedBlock) ProtoMessage() {} +func (*Schema_NestedBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{5, 2} +} +func (m *Schema_NestedBlock) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Schema_NestedBlock.Unmarshal(m, b) +} +func (m *Schema_NestedBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Schema_NestedBlock.Marshal(b, m, deterministic) +} +func (dst *Schema_NestedBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schema_NestedBlock.Merge(dst, src) +} +func (m *Schema_NestedBlock) XXX_Size() int { + return xxx_messageInfo_Schema_NestedBlock.Size(m) +} +func (m *Schema_NestedBlock) XXX_DiscardUnknown() { + xxx_messageInfo_Schema_NestedBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_Schema_NestedBlock proto.InternalMessageInfo + +func (m *Schema_NestedBlock) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *Schema_NestedBlock) GetBlock() *Schema_Block { + if m != nil { + return m.Block + } + return nil +} + +func (m *Schema_NestedBlock) GetNesting() Schema_NestedBlock_NestingMode { + if m != nil { + return m.Nesting + } + return Schema_NestedBlock_INVALID +} + +func (m *Schema_NestedBlock) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *Schema_NestedBlock) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +type GetProviderSchema struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetProviderSchema) Reset() { *m = GetProviderSchema{} } +func (m *GetProviderSchema) String() string { return proto.CompactTextString(m) } +func (*GetProviderSchema) ProtoMessage() {} +func (*GetProviderSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{6} +} +func (m *GetProviderSchema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetProviderSchema.Unmarshal(m, b) +} +func (m *GetProviderSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetProviderSchema.Marshal(b, m, deterministic) +} +func (dst *GetProviderSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProviderSchema.Merge(dst, src) +} +func (m *GetProviderSchema) XXX_Size() int { + return xxx_messageInfo_GetProviderSchema.Size(m) +} +func (m *GetProviderSchema) XXX_DiscardUnknown() { + xxx_messageInfo_GetProviderSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_GetProviderSchema proto.InternalMessageInfo + +type GetProviderSchema_Request struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetProviderSchema_Request) Reset() { *m = GetProviderSchema_Request{} } +func (m *GetProviderSchema_Request) String() string { return proto.CompactTextString(m) } +func (*GetProviderSchema_Request) ProtoMessage() {} +func (*GetProviderSchema_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{6, 0} +} +func (m *GetProviderSchema_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetProviderSchema_Request.Unmarshal(m, b) +} +func (m *GetProviderSchema_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetProviderSchema_Request.Marshal(b, m, deterministic) +} +func (dst *GetProviderSchema_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProviderSchema_Request.Merge(dst, src) +} +func (m *GetProviderSchema_Request) XXX_Size() int { + return xxx_messageInfo_GetProviderSchema_Request.Size(m) +} +func (m *GetProviderSchema_Request) XXX_DiscardUnknown() { + xxx_messageInfo_GetProviderSchema_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_GetProviderSchema_Request proto.InternalMessageInfo + +type GetProviderSchema_Response struct { + Provider *Schema `protobuf:"bytes,1,opt,name=provider,proto3" json:"provider,omitempty"` + ResourceSchemas map[string]*Schema `protobuf:"bytes,2,rep,name=resource_schemas,json=resourceSchemas,proto3" json:"resource_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + DataSourceSchemas map[string]*Schema `protobuf:"bytes,3,rep,name=data_source_schemas,json=dataSourceSchemas,proto3" json:"data_source_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetProviderSchema_Response) Reset() { *m = GetProviderSchema_Response{} } +func (m *GetProviderSchema_Response) String() string { return proto.CompactTextString(m) } +func (*GetProviderSchema_Response) ProtoMessage() {} +func (*GetProviderSchema_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{6, 1} +} +func (m *GetProviderSchema_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetProviderSchema_Response.Unmarshal(m, b) +} +func (m *GetProviderSchema_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetProviderSchema_Response.Marshal(b, m, deterministic) +} +func (dst *GetProviderSchema_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProviderSchema_Response.Merge(dst, src) +} +func (m *GetProviderSchema_Response) XXX_Size() int { + return xxx_messageInfo_GetProviderSchema_Response.Size(m) +} +func (m *GetProviderSchema_Response) XXX_DiscardUnknown() { + xxx_messageInfo_GetProviderSchema_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_GetProviderSchema_Response proto.InternalMessageInfo + +func (m *GetProviderSchema_Response) GetProvider() *Schema { + if m != nil { + return m.Provider + } + return nil +} + +func (m *GetProviderSchema_Response) GetResourceSchemas() map[string]*Schema { + if m != nil { + return m.ResourceSchemas + } + return nil +} + +func (m *GetProviderSchema_Response) GetDataSourceSchemas() map[string]*Schema { + if m != nil { + return m.DataSourceSchemas + } + return nil +} + +func (m *GetProviderSchema_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type PrepareProviderConfig struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PrepareProviderConfig) Reset() { *m = PrepareProviderConfig{} } +func (m *PrepareProviderConfig) String() string { return proto.CompactTextString(m) } +func (*PrepareProviderConfig) ProtoMessage() {} +func (*PrepareProviderConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{7} +} +func (m *PrepareProviderConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PrepareProviderConfig.Unmarshal(m, b) +} +func (m *PrepareProviderConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PrepareProviderConfig.Marshal(b, m, deterministic) +} +func (dst *PrepareProviderConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrepareProviderConfig.Merge(dst, src) +} +func (m *PrepareProviderConfig) XXX_Size() int { + return xxx_messageInfo_PrepareProviderConfig.Size(m) +} +func (m *PrepareProviderConfig) XXX_DiscardUnknown() { + xxx_messageInfo_PrepareProviderConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_PrepareProviderConfig proto.InternalMessageInfo + +type PrepareProviderConfig_Request struct { + Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PrepareProviderConfig_Request) Reset() { *m = PrepareProviderConfig_Request{} } +func (m *PrepareProviderConfig_Request) String() string { return proto.CompactTextString(m) } +func (*PrepareProviderConfig_Request) ProtoMessage() {} +func (*PrepareProviderConfig_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{7, 0} +} +func (m *PrepareProviderConfig_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PrepareProviderConfig_Request.Unmarshal(m, b) +} +func (m *PrepareProviderConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PrepareProviderConfig_Request.Marshal(b, m, deterministic) +} +func (dst *PrepareProviderConfig_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrepareProviderConfig_Request.Merge(dst, src) +} +func (m *PrepareProviderConfig_Request) XXX_Size() int { + return xxx_messageInfo_PrepareProviderConfig_Request.Size(m) +} +func (m *PrepareProviderConfig_Request) XXX_DiscardUnknown() { + xxx_messageInfo_PrepareProviderConfig_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_PrepareProviderConfig_Request proto.InternalMessageInfo + +func (m *PrepareProviderConfig_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +type PrepareProviderConfig_Response struct { + PreparedConfig *DynamicValue `protobuf:"bytes,1,opt,name=prepared_config,json=preparedConfig,proto3" json:"prepared_config,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PrepareProviderConfig_Response) Reset() { *m = PrepareProviderConfig_Response{} } +func (m *PrepareProviderConfig_Response) String() string { return proto.CompactTextString(m) } +func (*PrepareProviderConfig_Response) ProtoMessage() {} +func (*PrepareProviderConfig_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{7, 1} +} +func (m *PrepareProviderConfig_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PrepareProviderConfig_Response.Unmarshal(m, b) +} +func (m *PrepareProviderConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PrepareProviderConfig_Response.Marshal(b, m, deterministic) +} +func (dst *PrepareProviderConfig_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrepareProviderConfig_Response.Merge(dst, src) +} +func (m *PrepareProviderConfig_Response) XXX_Size() int { + return xxx_messageInfo_PrepareProviderConfig_Response.Size(m) +} +func (m *PrepareProviderConfig_Response) XXX_DiscardUnknown() { + xxx_messageInfo_PrepareProviderConfig_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_PrepareProviderConfig_Response proto.InternalMessageInfo + +func (m *PrepareProviderConfig_Response) GetPreparedConfig() *DynamicValue { + if m != nil { + return m.PreparedConfig + } + return nil +} + +func (m *PrepareProviderConfig_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type UpgradeResourceState struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpgradeResourceState) Reset() { *m = UpgradeResourceState{} } +func (m *UpgradeResourceState) String() string { return proto.CompactTextString(m) } +func (*UpgradeResourceState) ProtoMessage() {} +func (*UpgradeResourceState) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{8} +} +func (m *UpgradeResourceState) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpgradeResourceState.Unmarshal(m, b) +} +func (m *UpgradeResourceState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpgradeResourceState.Marshal(b, m, deterministic) +} +func (dst *UpgradeResourceState) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpgradeResourceState.Merge(dst, src) +} +func (m *UpgradeResourceState) XXX_Size() int { + return xxx_messageInfo_UpgradeResourceState.Size(m) +} +func (m *UpgradeResourceState) XXX_DiscardUnknown() { + xxx_messageInfo_UpgradeResourceState.DiscardUnknown(m) +} + +var xxx_messageInfo_UpgradeResourceState proto.InternalMessageInfo + +type UpgradeResourceState_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + // version is the schema_version number recorded in the state file + Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + // raw_state is the raw states as stored for the resource. Core does + // not have access to the schema of prior_version, so it's the + // provider's responsibility to interpret this value using the + // appropriate older schema. The raw_state will be the json encoded + // state, or a legacy flat-mapped format. + RawState *RawState `protobuf:"bytes,3,opt,name=raw_state,json=rawState,proto3" json:"raw_state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpgradeResourceState_Request) Reset() { *m = UpgradeResourceState_Request{} } +func (m *UpgradeResourceState_Request) String() string { return proto.CompactTextString(m) } +func (*UpgradeResourceState_Request) ProtoMessage() {} +func (*UpgradeResourceState_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{8, 0} +} +func (m *UpgradeResourceState_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpgradeResourceState_Request.Unmarshal(m, b) +} +func (m *UpgradeResourceState_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpgradeResourceState_Request.Marshal(b, m, deterministic) +} +func (dst *UpgradeResourceState_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpgradeResourceState_Request.Merge(dst, src) +} +func (m *UpgradeResourceState_Request) XXX_Size() int { + return xxx_messageInfo_UpgradeResourceState_Request.Size(m) +} +func (m *UpgradeResourceState_Request) XXX_DiscardUnknown() { + xxx_messageInfo_UpgradeResourceState_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_UpgradeResourceState_Request proto.InternalMessageInfo + +func (m *UpgradeResourceState_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *UpgradeResourceState_Request) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *UpgradeResourceState_Request) GetRawState() *RawState { + if m != nil { + return m.RawState + } + return nil +} + +type UpgradeResourceState_Response struct { + // new_state is a msgpack-encoded data structure that, when interpreted with + // the _current_ schema for this resource type, is functionally equivalent to + // that which was given in prior_state_raw. + UpgradedState *DynamicValue `protobuf:"bytes,1,opt,name=upgraded_state,json=upgradedState,proto3" json:"upgraded_state,omitempty"` + // diagnostics describes any errors encountered during migration that could not + // be safely resolved, and warnings about any possibly-risky assumptions made + // in the upgrade process. + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpgradeResourceState_Response) Reset() { *m = UpgradeResourceState_Response{} } +func (m *UpgradeResourceState_Response) String() string { return proto.CompactTextString(m) } +func (*UpgradeResourceState_Response) ProtoMessage() {} +func (*UpgradeResourceState_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{8, 1} +} +func (m *UpgradeResourceState_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpgradeResourceState_Response.Unmarshal(m, b) +} +func (m *UpgradeResourceState_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpgradeResourceState_Response.Marshal(b, m, deterministic) +} +func (dst *UpgradeResourceState_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpgradeResourceState_Response.Merge(dst, src) +} +func (m *UpgradeResourceState_Response) XXX_Size() int { + return xxx_messageInfo_UpgradeResourceState_Response.Size(m) +} +func (m *UpgradeResourceState_Response) XXX_DiscardUnknown() { + xxx_messageInfo_UpgradeResourceState_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_UpgradeResourceState_Response proto.InternalMessageInfo + +func (m *UpgradeResourceState_Response) GetUpgradedState() *DynamicValue { + if m != nil { + return m.UpgradedState + } + return nil +} + +func (m *UpgradeResourceState_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type ValidateResourceTypeConfig struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateResourceTypeConfig) Reset() { *m = ValidateResourceTypeConfig{} } +func (m *ValidateResourceTypeConfig) String() string { return proto.CompactTextString(m) } +func (*ValidateResourceTypeConfig) ProtoMessage() {} +func (*ValidateResourceTypeConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{9} +} +func (m *ValidateResourceTypeConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateResourceTypeConfig.Unmarshal(m, b) +} +func (m *ValidateResourceTypeConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateResourceTypeConfig.Marshal(b, m, deterministic) +} +func (dst *ValidateResourceTypeConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateResourceTypeConfig.Merge(dst, src) +} +func (m *ValidateResourceTypeConfig) XXX_Size() int { + return xxx_messageInfo_ValidateResourceTypeConfig.Size(m) +} +func (m *ValidateResourceTypeConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateResourceTypeConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateResourceTypeConfig proto.InternalMessageInfo + +type ValidateResourceTypeConfig_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateResourceTypeConfig_Request) Reset() { *m = ValidateResourceTypeConfig_Request{} } +func (m *ValidateResourceTypeConfig_Request) String() string { return proto.CompactTextString(m) } +func (*ValidateResourceTypeConfig_Request) ProtoMessage() {} +func (*ValidateResourceTypeConfig_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{9, 0} +} +func (m *ValidateResourceTypeConfig_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateResourceTypeConfig_Request.Unmarshal(m, b) +} +func (m *ValidateResourceTypeConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateResourceTypeConfig_Request.Marshal(b, m, deterministic) +} +func (dst *ValidateResourceTypeConfig_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateResourceTypeConfig_Request.Merge(dst, src) +} +func (m *ValidateResourceTypeConfig_Request) XXX_Size() int { + return xxx_messageInfo_ValidateResourceTypeConfig_Request.Size(m) +} +func (m *ValidateResourceTypeConfig_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateResourceTypeConfig_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateResourceTypeConfig_Request proto.InternalMessageInfo + +func (m *ValidateResourceTypeConfig_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ValidateResourceTypeConfig_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +type ValidateResourceTypeConfig_Response struct { + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateResourceTypeConfig_Response) Reset() { *m = ValidateResourceTypeConfig_Response{} } +func (m *ValidateResourceTypeConfig_Response) String() string { return proto.CompactTextString(m) } +func (*ValidateResourceTypeConfig_Response) ProtoMessage() {} +func (*ValidateResourceTypeConfig_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{9, 1} +} +func (m *ValidateResourceTypeConfig_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateResourceTypeConfig_Response.Unmarshal(m, b) +} +func (m *ValidateResourceTypeConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateResourceTypeConfig_Response.Marshal(b, m, deterministic) +} +func (dst *ValidateResourceTypeConfig_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateResourceTypeConfig_Response.Merge(dst, src) +} +func (m *ValidateResourceTypeConfig_Response) XXX_Size() int { + return xxx_messageInfo_ValidateResourceTypeConfig_Response.Size(m) +} +func (m *ValidateResourceTypeConfig_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateResourceTypeConfig_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateResourceTypeConfig_Response proto.InternalMessageInfo + +func (m *ValidateResourceTypeConfig_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type ValidateDataSourceConfig struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateDataSourceConfig) Reset() { *m = ValidateDataSourceConfig{} } +func (m *ValidateDataSourceConfig) String() string { return proto.CompactTextString(m) } +func (*ValidateDataSourceConfig) ProtoMessage() {} +func (*ValidateDataSourceConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{10} +} +func (m *ValidateDataSourceConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateDataSourceConfig.Unmarshal(m, b) +} +func (m *ValidateDataSourceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateDataSourceConfig.Marshal(b, m, deterministic) +} +func (dst *ValidateDataSourceConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateDataSourceConfig.Merge(dst, src) +} +func (m *ValidateDataSourceConfig) XXX_Size() int { + return xxx_messageInfo_ValidateDataSourceConfig.Size(m) +} +func (m *ValidateDataSourceConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateDataSourceConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateDataSourceConfig proto.InternalMessageInfo + +type ValidateDataSourceConfig_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateDataSourceConfig_Request) Reset() { *m = ValidateDataSourceConfig_Request{} } +func (m *ValidateDataSourceConfig_Request) String() string { return proto.CompactTextString(m) } +func (*ValidateDataSourceConfig_Request) ProtoMessage() {} +func (*ValidateDataSourceConfig_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{10, 0} +} +func (m *ValidateDataSourceConfig_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateDataSourceConfig_Request.Unmarshal(m, b) +} +func (m *ValidateDataSourceConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateDataSourceConfig_Request.Marshal(b, m, deterministic) +} +func (dst *ValidateDataSourceConfig_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateDataSourceConfig_Request.Merge(dst, src) +} +func (m *ValidateDataSourceConfig_Request) XXX_Size() int { + return xxx_messageInfo_ValidateDataSourceConfig_Request.Size(m) +} +func (m *ValidateDataSourceConfig_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateDataSourceConfig_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateDataSourceConfig_Request proto.InternalMessageInfo + +func (m *ValidateDataSourceConfig_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ValidateDataSourceConfig_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +type ValidateDataSourceConfig_Response struct { + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateDataSourceConfig_Response) Reset() { *m = ValidateDataSourceConfig_Response{} } +func (m *ValidateDataSourceConfig_Response) String() string { return proto.CompactTextString(m) } +func (*ValidateDataSourceConfig_Response) ProtoMessage() {} +func (*ValidateDataSourceConfig_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{10, 1} +} +func (m *ValidateDataSourceConfig_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateDataSourceConfig_Response.Unmarshal(m, b) +} +func (m *ValidateDataSourceConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateDataSourceConfig_Response.Marshal(b, m, deterministic) +} +func (dst *ValidateDataSourceConfig_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateDataSourceConfig_Response.Merge(dst, src) +} +func (m *ValidateDataSourceConfig_Response) XXX_Size() int { + return xxx_messageInfo_ValidateDataSourceConfig_Response.Size(m) +} +func (m *ValidateDataSourceConfig_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateDataSourceConfig_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateDataSourceConfig_Response proto.InternalMessageInfo + +func (m *ValidateDataSourceConfig_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type Configure struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Configure) Reset() { *m = Configure{} } +func (m *Configure) String() string { return proto.CompactTextString(m) } +func (*Configure) ProtoMessage() {} +func (*Configure) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{11} +} +func (m *Configure) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Configure.Unmarshal(m, b) +} +func (m *Configure) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Configure.Marshal(b, m, deterministic) +} +func (dst *Configure) XXX_Merge(src proto.Message) { + xxx_messageInfo_Configure.Merge(dst, src) +} +func (m *Configure) XXX_Size() int { + return xxx_messageInfo_Configure.Size(m) +} +func (m *Configure) XXX_DiscardUnknown() { + xxx_messageInfo_Configure.DiscardUnknown(m) +} + +var xxx_messageInfo_Configure proto.InternalMessageInfo + +type Configure_Request struct { + TerraformVersion string `protobuf:"bytes,1,opt,name=terraform_version,json=terraformVersion,proto3" json:"terraform_version,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Configure_Request) Reset() { *m = Configure_Request{} } +func (m *Configure_Request) String() string { return proto.CompactTextString(m) } +func (*Configure_Request) ProtoMessage() {} +func (*Configure_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{11, 0} +} +func (m *Configure_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Configure_Request.Unmarshal(m, b) +} +func (m *Configure_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Configure_Request.Marshal(b, m, deterministic) +} +func (dst *Configure_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Configure_Request.Merge(dst, src) +} +func (m *Configure_Request) XXX_Size() int { + return xxx_messageInfo_Configure_Request.Size(m) +} +func (m *Configure_Request) XXX_DiscardUnknown() { + xxx_messageInfo_Configure_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Configure_Request proto.InternalMessageInfo + +func (m *Configure_Request) GetTerraformVersion() string { + if m != nil { + return m.TerraformVersion + } + return "" +} + +func (m *Configure_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +type Configure_Response struct { + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Configure_Response) Reset() { *m = Configure_Response{} } +func (m *Configure_Response) String() string { return proto.CompactTextString(m) } +func (*Configure_Response) ProtoMessage() {} +func (*Configure_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{11, 1} +} +func (m *Configure_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Configure_Response.Unmarshal(m, b) +} +func (m *Configure_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Configure_Response.Marshal(b, m, deterministic) +} +func (dst *Configure_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_Configure_Response.Merge(dst, src) +} +func (m *Configure_Response) XXX_Size() int { + return xxx_messageInfo_Configure_Response.Size(m) +} +func (m *Configure_Response) XXX_DiscardUnknown() { + xxx_messageInfo_Configure_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_Configure_Response proto.InternalMessageInfo + +func (m *Configure_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type ReadResource struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadResource) Reset() { *m = ReadResource{} } +func (m *ReadResource) String() string { return proto.CompactTextString(m) } +func (*ReadResource) ProtoMessage() {} +func (*ReadResource) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{12} +} +func (m *ReadResource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadResource.Unmarshal(m, b) +} +func (m *ReadResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadResource.Marshal(b, m, deterministic) +} +func (dst *ReadResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadResource.Merge(dst, src) +} +func (m *ReadResource) XXX_Size() int { + return xxx_messageInfo_ReadResource.Size(m) +} +func (m *ReadResource) XXX_DiscardUnknown() { + xxx_messageInfo_ReadResource.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadResource proto.InternalMessageInfo + +type ReadResource_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + CurrentState *DynamicValue `protobuf:"bytes,2,opt,name=current_state,json=currentState,proto3" json:"current_state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadResource_Request) Reset() { *m = ReadResource_Request{} } +func (m *ReadResource_Request) String() string { return proto.CompactTextString(m) } +func (*ReadResource_Request) ProtoMessage() {} +func (*ReadResource_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{12, 0} +} +func (m *ReadResource_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadResource_Request.Unmarshal(m, b) +} +func (m *ReadResource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadResource_Request.Marshal(b, m, deterministic) +} +func (dst *ReadResource_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadResource_Request.Merge(dst, src) +} +func (m *ReadResource_Request) XXX_Size() int { + return xxx_messageInfo_ReadResource_Request.Size(m) +} +func (m *ReadResource_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ReadResource_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadResource_Request proto.InternalMessageInfo + +func (m *ReadResource_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ReadResource_Request) GetCurrentState() *DynamicValue { + if m != nil { + return m.CurrentState + } + return nil +} + +type ReadResource_Response struct { + NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadResource_Response) Reset() { *m = ReadResource_Response{} } +func (m *ReadResource_Response) String() string { return proto.CompactTextString(m) } +func (*ReadResource_Response) ProtoMessage() {} +func (*ReadResource_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{12, 1} +} +func (m *ReadResource_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadResource_Response.Unmarshal(m, b) +} +func (m *ReadResource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadResource_Response.Marshal(b, m, deterministic) +} +func (dst *ReadResource_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadResource_Response.Merge(dst, src) +} +func (m *ReadResource_Response) XXX_Size() int { + return xxx_messageInfo_ReadResource_Response.Size(m) +} +func (m *ReadResource_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ReadResource_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadResource_Response proto.InternalMessageInfo + +func (m *ReadResource_Response) GetNewState() *DynamicValue { + if m != nil { + return m.NewState + } + return nil +} + +func (m *ReadResource_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type PlanResourceChange struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PlanResourceChange) Reset() { *m = PlanResourceChange{} } +func (m *PlanResourceChange) String() string { return proto.CompactTextString(m) } +func (*PlanResourceChange) ProtoMessage() {} +func (*PlanResourceChange) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{13} +} +func (m *PlanResourceChange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PlanResourceChange.Unmarshal(m, b) +} +func (m *PlanResourceChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PlanResourceChange.Marshal(b, m, deterministic) +} +func (dst *PlanResourceChange) XXX_Merge(src proto.Message) { + xxx_messageInfo_PlanResourceChange.Merge(dst, src) +} +func (m *PlanResourceChange) XXX_Size() int { + return xxx_messageInfo_PlanResourceChange.Size(m) +} +func (m *PlanResourceChange) XXX_DiscardUnknown() { + xxx_messageInfo_PlanResourceChange.DiscardUnknown(m) +} + +var xxx_messageInfo_PlanResourceChange proto.InternalMessageInfo + +type PlanResourceChange_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"` + ProposedNewState *DynamicValue `protobuf:"bytes,3,opt,name=proposed_new_state,json=proposedNewState,proto3" json:"proposed_new_state,omitempty"` + Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` + PriorPrivate []byte `protobuf:"bytes,5,opt,name=prior_private,json=priorPrivate,proto3" json:"prior_private,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PlanResourceChange_Request) Reset() { *m = PlanResourceChange_Request{} } +func (m *PlanResourceChange_Request) String() string { return proto.CompactTextString(m) } +func (*PlanResourceChange_Request) ProtoMessage() {} +func (*PlanResourceChange_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{13, 0} +} +func (m *PlanResourceChange_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PlanResourceChange_Request.Unmarshal(m, b) +} +func (m *PlanResourceChange_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PlanResourceChange_Request.Marshal(b, m, deterministic) +} +func (dst *PlanResourceChange_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_PlanResourceChange_Request.Merge(dst, src) +} +func (m *PlanResourceChange_Request) XXX_Size() int { + return xxx_messageInfo_PlanResourceChange_Request.Size(m) +} +func (m *PlanResourceChange_Request) XXX_DiscardUnknown() { + xxx_messageInfo_PlanResourceChange_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_PlanResourceChange_Request proto.InternalMessageInfo + +func (m *PlanResourceChange_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *PlanResourceChange_Request) GetPriorState() *DynamicValue { + if m != nil { + return m.PriorState + } + return nil +} + +func (m *PlanResourceChange_Request) GetProposedNewState() *DynamicValue { + if m != nil { + return m.ProposedNewState + } + return nil +} + +func (m *PlanResourceChange_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +func (m *PlanResourceChange_Request) GetPriorPrivate() []byte { + if m != nil { + return m.PriorPrivate + } + return nil +} + +type PlanResourceChange_Response struct { + PlannedState *DynamicValue `protobuf:"bytes,1,opt,name=planned_state,json=plannedState,proto3" json:"planned_state,omitempty"` + RequiresReplace []*AttributePath `protobuf:"bytes,2,rep,name=requires_replace,json=requiresReplace,proto3" json:"requires_replace,omitempty"` + PlannedPrivate []byte `protobuf:"bytes,3,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + LegacyTypeSystem bool `protobuf:"varint,5,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PlanResourceChange_Response) Reset() { *m = PlanResourceChange_Response{} } +func (m *PlanResourceChange_Response) String() string { return proto.CompactTextString(m) } +func (*PlanResourceChange_Response) ProtoMessage() {} +func (*PlanResourceChange_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{13, 1} +} +func (m *PlanResourceChange_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PlanResourceChange_Response.Unmarshal(m, b) +} +func (m *PlanResourceChange_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PlanResourceChange_Response.Marshal(b, m, deterministic) +} +func (dst *PlanResourceChange_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_PlanResourceChange_Response.Merge(dst, src) +} +func (m *PlanResourceChange_Response) XXX_Size() int { + return xxx_messageInfo_PlanResourceChange_Response.Size(m) +} +func (m *PlanResourceChange_Response) XXX_DiscardUnknown() { + xxx_messageInfo_PlanResourceChange_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_PlanResourceChange_Response proto.InternalMessageInfo + +func (m *PlanResourceChange_Response) GetPlannedState() *DynamicValue { + if m != nil { + return m.PlannedState + } + return nil +} + +func (m *PlanResourceChange_Response) GetRequiresReplace() []*AttributePath { + if m != nil { + return m.RequiresReplace + } + return nil +} + +func (m *PlanResourceChange_Response) GetPlannedPrivate() []byte { + if m != nil { + return m.PlannedPrivate + } + return nil +} + +func (m *PlanResourceChange_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +func (m *PlanResourceChange_Response) GetLegacyTypeSystem() bool { + if m != nil { + return m.LegacyTypeSystem + } + return false +} + +type ApplyResourceChange struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApplyResourceChange) Reset() { *m = ApplyResourceChange{} } +func (m *ApplyResourceChange) String() string { return proto.CompactTextString(m) } +func (*ApplyResourceChange) ProtoMessage() {} +func (*ApplyResourceChange) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{14} +} +func (m *ApplyResourceChange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApplyResourceChange.Unmarshal(m, b) +} +func (m *ApplyResourceChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApplyResourceChange.Marshal(b, m, deterministic) +} +func (dst *ApplyResourceChange) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplyResourceChange.Merge(dst, src) +} +func (m *ApplyResourceChange) XXX_Size() int { + return xxx_messageInfo_ApplyResourceChange.Size(m) +} +func (m *ApplyResourceChange) XXX_DiscardUnknown() { + xxx_messageInfo_ApplyResourceChange.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplyResourceChange proto.InternalMessageInfo + +type ApplyResourceChange_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"` + PlannedState *DynamicValue `protobuf:"bytes,3,opt,name=planned_state,json=plannedState,proto3" json:"planned_state,omitempty"` + Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` + PlannedPrivate []byte `protobuf:"bytes,5,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApplyResourceChange_Request) Reset() { *m = ApplyResourceChange_Request{} } +func (m *ApplyResourceChange_Request) String() string { return proto.CompactTextString(m) } +func (*ApplyResourceChange_Request) ProtoMessage() {} +func (*ApplyResourceChange_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{14, 0} +} +func (m *ApplyResourceChange_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApplyResourceChange_Request.Unmarshal(m, b) +} +func (m *ApplyResourceChange_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApplyResourceChange_Request.Marshal(b, m, deterministic) +} +func (dst *ApplyResourceChange_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplyResourceChange_Request.Merge(dst, src) +} +func (m *ApplyResourceChange_Request) XXX_Size() int { + return xxx_messageInfo_ApplyResourceChange_Request.Size(m) +} +func (m *ApplyResourceChange_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ApplyResourceChange_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplyResourceChange_Request proto.InternalMessageInfo + +func (m *ApplyResourceChange_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ApplyResourceChange_Request) GetPriorState() *DynamicValue { + if m != nil { + return m.PriorState + } + return nil +} + +func (m *ApplyResourceChange_Request) GetPlannedState() *DynamicValue { + if m != nil { + return m.PlannedState + } + return nil +} + +func (m *ApplyResourceChange_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +func (m *ApplyResourceChange_Request) GetPlannedPrivate() []byte { + if m != nil { + return m.PlannedPrivate + } + return nil +} + +type ApplyResourceChange_Response struct { + NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"` + Private []byte `protobuf:"bytes,2,opt,name=private,proto3" json:"private,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,3,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + LegacyTypeSystem bool `protobuf:"varint,4,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApplyResourceChange_Response) Reset() { *m = ApplyResourceChange_Response{} } +func (m *ApplyResourceChange_Response) String() string { return proto.CompactTextString(m) } +func (*ApplyResourceChange_Response) ProtoMessage() {} +func (*ApplyResourceChange_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{14, 1} +} +func (m *ApplyResourceChange_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApplyResourceChange_Response.Unmarshal(m, b) +} +func (m *ApplyResourceChange_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApplyResourceChange_Response.Marshal(b, m, deterministic) +} +func (dst *ApplyResourceChange_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplyResourceChange_Response.Merge(dst, src) +} +func (m *ApplyResourceChange_Response) XXX_Size() int { + return xxx_messageInfo_ApplyResourceChange_Response.Size(m) +} +func (m *ApplyResourceChange_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ApplyResourceChange_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplyResourceChange_Response proto.InternalMessageInfo + +func (m *ApplyResourceChange_Response) GetNewState() *DynamicValue { + if m != nil { + return m.NewState + } + return nil +} + +func (m *ApplyResourceChange_Response) GetPrivate() []byte { + if m != nil { + return m.Private + } + return nil +} + +func (m *ApplyResourceChange_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +func (m *ApplyResourceChange_Response) GetLegacyTypeSystem() bool { + if m != nil { + return m.LegacyTypeSystem + } + return false +} + +type ImportResourceState struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ImportResourceState) Reset() { *m = ImportResourceState{} } +func (m *ImportResourceState) String() string { return proto.CompactTextString(m) } +func (*ImportResourceState) ProtoMessage() {} +func (*ImportResourceState) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{15} +} +func (m *ImportResourceState) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ImportResourceState.Unmarshal(m, b) +} +func (m *ImportResourceState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ImportResourceState.Marshal(b, m, deterministic) +} +func (dst *ImportResourceState) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImportResourceState.Merge(dst, src) +} +func (m *ImportResourceState) XXX_Size() int { + return xxx_messageInfo_ImportResourceState.Size(m) +} +func (m *ImportResourceState) XXX_DiscardUnknown() { + xxx_messageInfo_ImportResourceState.DiscardUnknown(m) +} + +var xxx_messageInfo_ImportResourceState proto.InternalMessageInfo + +type ImportResourceState_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ImportResourceState_Request) Reset() { *m = ImportResourceState_Request{} } +func (m *ImportResourceState_Request) String() string { return proto.CompactTextString(m) } +func (*ImportResourceState_Request) ProtoMessage() {} +func (*ImportResourceState_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{15, 0} +} +func (m *ImportResourceState_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ImportResourceState_Request.Unmarshal(m, b) +} +func (m *ImportResourceState_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ImportResourceState_Request.Marshal(b, m, deterministic) +} +func (dst *ImportResourceState_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImportResourceState_Request.Merge(dst, src) +} +func (m *ImportResourceState_Request) XXX_Size() int { + return xxx_messageInfo_ImportResourceState_Request.Size(m) +} +func (m *ImportResourceState_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ImportResourceState_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ImportResourceState_Request proto.InternalMessageInfo + +func (m *ImportResourceState_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ImportResourceState_Request) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +type ImportResourceState_ImportedResource struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + State *DynamicValue `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` + Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ImportResourceState_ImportedResource) Reset() { *m = ImportResourceState_ImportedResource{} } +func (m *ImportResourceState_ImportedResource) String() string { return proto.CompactTextString(m) } +func (*ImportResourceState_ImportedResource) ProtoMessage() {} +func (*ImportResourceState_ImportedResource) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{15, 1} +} +func (m *ImportResourceState_ImportedResource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ImportResourceState_ImportedResource.Unmarshal(m, b) +} +func (m *ImportResourceState_ImportedResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ImportResourceState_ImportedResource.Marshal(b, m, deterministic) +} +func (dst *ImportResourceState_ImportedResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImportResourceState_ImportedResource.Merge(dst, src) +} +func (m *ImportResourceState_ImportedResource) XXX_Size() int { + return xxx_messageInfo_ImportResourceState_ImportedResource.Size(m) +} +func (m *ImportResourceState_ImportedResource) XXX_DiscardUnknown() { + xxx_messageInfo_ImportResourceState_ImportedResource.DiscardUnknown(m) +} + +var xxx_messageInfo_ImportResourceState_ImportedResource proto.InternalMessageInfo + +func (m *ImportResourceState_ImportedResource) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ImportResourceState_ImportedResource) GetState() *DynamicValue { + if m != nil { + return m.State + } + return nil +} + +func (m *ImportResourceState_ImportedResource) GetPrivate() []byte { + if m != nil { + return m.Private + } + return nil +} + +type ImportResourceState_Response struct { + ImportedResources []*ImportResourceState_ImportedResource `protobuf:"bytes,1,rep,name=imported_resources,json=importedResources,proto3" json:"imported_resources,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ImportResourceState_Response) Reset() { *m = ImportResourceState_Response{} } +func (m *ImportResourceState_Response) String() string { return proto.CompactTextString(m) } +func (*ImportResourceState_Response) ProtoMessage() {} +func (*ImportResourceState_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{15, 2} +} +func (m *ImportResourceState_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ImportResourceState_Response.Unmarshal(m, b) +} +func (m *ImportResourceState_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ImportResourceState_Response.Marshal(b, m, deterministic) +} +func (dst *ImportResourceState_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImportResourceState_Response.Merge(dst, src) +} +func (m *ImportResourceState_Response) XXX_Size() int { + return xxx_messageInfo_ImportResourceState_Response.Size(m) +} +func (m *ImportResourceState_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ImportResourceState_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ImportResourceState_Response proto.InternalMessageInfo + +func (m *ImportResourceState_Response) GetImportedResources() []*ImportResourceState_ImportedResource { + if m != nil { + return m.ImportedResources + } + return nil +} + +func (m *ImportResourceState_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type ReadDataSource struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadDataSource) Reset() { *m = ReadDataSource{} } +func (m *ReadDataSource) String() string { return proto.CompactTextString(m) } +func (*ReadDataSource) ProtoMessage() {} +func (*ReadDataSource) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{16} +} +func (m *ReadDataSource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadDataSource.Unmarshal(m, b) +} +func (m *ReadDataSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadDataSource.Marshal(b, m, deterministic) +} +func (dst *ReadDataSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadDataSource.Merge(dst, src) +} +func (m *ReadDataSource) XXX_Size() int { + return xxx_messageInfo_ReadDataSource.Size(m) +} +func (m *ReadDataSource) XXX_DiscardUnknown() { + xxx_messageInfo_ReadDataSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadDataSource proto.InternalMessageInfo + +type ReadDataSource_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadDataSource_Request) Reset() { *m = ReadDataSource_Request{} } +func (m *ReadDataSource_Request) String() string { return proto.CompactTextString(m) } +func (*ReadDataSource_Request) ProtoMessage() {} +func (*ReadDataSource_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{16, 0} +} +func (m *ReadDataSource_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadDataSource_Request.Unmarshal(m, b) +} +func (m *ReadDataSource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadDataSource_Request.Marshal(b, m, deterministic) +} +func (dst *ReadDataSource_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadDataSource_Request.Merge(dst, src) +} +func (m *ReadDataSource_Request) XXX_Size() int { + return xxx_messageInfo_ReadDataSource_Request.Size(m) +} +func (m *ReadDataSource_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ReadDataSource_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadDataSource_Request proto.InternalMessageInfo + +func (m *ReadDataSource_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ReadDataSource_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +type ReadDataSource_Response struct { + State *DynamicValue `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadDataSource_Response) Reset() { *m = ReadDataSource_Response{} } +func (m *ReadDataSource_Response) String() string { return proto.CompactTextString(m) } +func (*ReadDataSource_Response) ProtoMessage() {} +func (*ReadDataSource_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{16, 1} +} +func (m *ReadDataSource_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadDataSource_Response.Unmarshal(m, b) +} +func (m *ReadDataSource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadDataSource_Response.Marshal(b, m, deterministic) +} +func (dst *ReadDataSource_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadDataSource_Response.Merge(dst, src) +} +func (m *ReadDataSource_Response) XXX_Size() int { + return xxx_messageInfo_ReadDataSource_Response.Size(m) +} +func (m *ReadDataSource_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ReadDataSource_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadDataSource_Response proto.InternalMessageInfo + +func (m *ReadDataSource_Response) GetState() *DynamicValue { + if m != nil { + return m.State + } + return nil +} + +func (m *ReadDataSource_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type GetProvisionerSchema struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetProvisionerSchema) Reset() { *m = GetProvisionerSchema{} } +func (m *GetProvisionerSchema) String() string { return proto.CompactTextString(m) } +func (*GetProvisionerSchema) ProtoMessage() {} +func (*GetProvisionerSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{17} +} +func (m *GetProvisionerSchema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetProvisionerSchema.Unmarshal(m, b) +} +func (m *GetProvisionerSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetProvisionerSchema.Marshal(b, m, deterministic) +} +func (dst *GetProvisionerSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProvisionerSchema.Merge(dst, src) +} +func (m *GetProvisionerSchema) XXX_Size() int { + return xxx_messageInfo_GetProvisionerSchema.Size(m) +} +func (m *GetProvisionerSchema) XXX_DiscardUnknown() { + xxx_messageInfo_GetProvisionerSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_GetProvisionerSchema proto.InternalMessageInfo + +type GetProvisionerSchema_Request struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetProvisionerSchema_Request) Reset() { *m = GetProvisionerSchema_Request{} } +func (m *GetProvisionerSchema_Request) String() string { return proto.CompactTextString(m) } +func (*GetProvisionerSchema_Request) ProtoMessage() {} +func (*GetProvisionerSchema_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{17, 0} +} +func (m *GetProvisionerSchema_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetProvisionerSchema_Request.Unmarshal(m, b) +} +func (m *GetProvisionerSchema_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetProvisionerSchema_Request.Marshal(b, m, deterministic) +} +func (dst *GetProvisionerSchema_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProvisionerSchema_Request.Merge(dst, src) +} +func (m *GetProvisionerSchema_Request) XXX_Size() int { + return xxx_messageInfo_GetProvisionerSchema_Request.Size(m) +} +func (m *GetProvisionerSchema_Request) XXX_DiscardUnknown() { + xxx_messageInfo_GetProvisionerSchema_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_GetProvisionerSchema_Request proto.InternalMessageInfo + +type GetProvisionerSchema_Response struct { + Provisioner *Schema `protobuf:"bytes,1,opt,name=provisioner,proto3" json:"provisioner,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetProvisionerSchema_Response) Reset() { *m = GetProvisionerSchema_Response{} } +func (m *GetProvisionerSchema_Response) String() string { return proto.CompactTextString(m) } +func (*GetProvisionerSchema_Response) ProtoMessage() {} +func (*GetProvisionerSchema_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{17, 1} +} +func (m *GetProvisionerSchema_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetProvisionerSchema_Response.Unmarshal(m, b) +} +func (m *GetProvisionerSchema_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetProvisionerSchema_Response.Marshal(b, m, deterministic) +} +func (dst *GetProvisionerSchema_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProvisionerSchema_Response.Merge(dst, src) +} +func (m *GetProvisionerSchema_Response) XXX_Size() int { + return xxx_messageInfo_GetProvisionerSchema_Response.Size(m) +} +func (m *GetProvisionerSchema_Response) XXX_DiscardUnknown() { + xxx_messageInfo_GetProvisionerSchema_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_GetProvisionerSchema_Response proto.InternalMessageInfo + +func (m *GetProvisionerSchema_Response) GetProvisioner() *Schema { + if m != nil { + return m.Provisioner + } + return nil +} + +func (m *GetProvisionerSchema_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type ValidateProvisionerConfig struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateProvisionerConfig) Reset() { *m = ValidateProvisionerConfig{} } +func (m *ValidateProvisionerConfig) String() string { return proto.CompactTextString(m) } +func (*ValidateProvisionerConfig) ProtoMessage() {} +func (*ValidateProvisionerConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{18} +} +func (m *ValidateProvisionerConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateProvisionerConfig.Unmarshal(m, b) +} +func (m *ValidateProvisionerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateProvisionerConfig.Marshal(b, m, deterministic) +} +func (dst *ValidateProvisionerConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateProvisionerConfig.Merge(dst, src) +} +func (m *ValidateProvisionerConfig) XXX_Size() int { + return xxx_messageInfo_ValidateProvisionerConfig.Size(m) +} +func (m *ValidateProvisionerConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateProvisionerConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateProvisionerConfig proto.InternalMessageInfo + +type ValidateProvisionerConfig_Request struct { + Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateProvisionerConfig_Request) Reset() { *m = ValidateProvisionerConfig_Request{} } +func (m *ValidateProvisionerConfig_Request) String() string { return proto.CompactTextString(m) } +func (*ValidateProvisionerConfig_Request) ProtoMessage() {} +func (*ValidateProvisionerConfig_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{18, 0} +} +func (m *ValidateProvisionerConfig_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateProvisionerConfig_Request.Unmarshal(m, b) +} +func (m *ValidateProvisionerConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateProvisionerConfig_Request.Marshal(b, m, deterministic) +} +func (dst *ValidateProvisionerConfig_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateProvisionerConfig_Request.Merge(dst, src) +} +func (m *ValidateProvisionerConfig_Request) XXX_Size() int { + return xxx_messageInfo_ValidateProvisionerConfig_Request.Size(m) +} +func (m *ValidateProvisionerConfig_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateProvisionerConfig_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateProvisionerConfig_Request proto.InternalMessageInfo + +func (m *ValidateProvisionerConfig_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +type ValidateProvisionerConfig_Response struct { + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateProvisionerConfig_Response) Reset() { *m = ValidateProvisionerConfig_Response{} } +func (m *ValidateProvisionerConfig_Response) String() string { return proto.CompactTextString(m) } +func (*ValidateProvisionerConfig_Response) ProtoMessage() {} +func (*ValidateProvisionerConfig_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{18, 1} +} +func (m *ValidateProvisionerConfig_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateProvisionerConfig_Response.Unmarshal(m, b) +} +func (m *ValidateProvisionerConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateProvisionerConfig_Response.Marshal(b, m, deterministic) +} +func (dst *ValidateProvisionerConfig_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateProvisionerConfig_Response.Merge(dst, src) +} +func (m *ValidateProvisionerConfig_Response) XXX_Size() int { + return xxx_messageInfo_ValidateProvisionerConfig_Response.Size(m) +} +func (m *ValidateProvisionerConfig_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateProvisionerConfig_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateProvisionerConfig_Response proto.InternalMessageInfo + +func (m *ValidateProvisionerConfig_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type ProvisionResource struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProvisionResource) Reset() { *m = ProvisionResource{} } +func (m *ProvisionResource) String() string { return proto.CompactTextString(m) } +func (*ProvisionResource) ProtoMessage() {} +func (*ProvisionResource) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{19} +} +func (m *ProvisionResource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProvisionResource.Unmarshal(m, b) +} +func (m *ProvisionResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProvisionResource.Marshal(b, m, deterministic) +} +func (dst *ProvisionResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProvisionResource.Merge(dst, src) +} +func (m *ProvisionResource) XXX_Size() int { + return xxx_messageInfo_ProvisionResource.Size(m) +} +func (m *ProvisionResource) XXX_DiscardUnknown() { + xxx_messageInfo_ProvisionResource.DiscardUnknown(m) +} + +var xxx_messageInfo_ProvisionResource proto.InternalMessageInfo + +type ProvisionResource_Request struct { + Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + Connection *DynamicValue `protobuf:"bytes,2,opt,name=connection,proto3" json:"connection,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProvisionResource_Request) Reset() { *m = ProvisionResource_Request{} } +func (m *ProvisionResource_Request) String() string { return proto.CompactTextString(m) } +func (*ProvisionResource_Request) ProtoMessage() {} +func (*ProvisionResource_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{19, 0} +} +func (m *ProvisionResource_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProvisionResource_Request.Unmarshal(m, b) +} +func (m *ProvisionResource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProvisionResource_Request.Marshal(b, m, deterministic) +} +func (dst *ProvisionResource_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProvisionResource_Request.Merge(dst, src) +} +func (m *ProvisionResource_Request) XXX_Size() int { + return xxx_messageInfo_ProvisionResource_Request.Size(m) +} +func (m *ProvisionResource_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ProvisionResource_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ProvisionResource_Request proto.InternalMessageInfo + +func (m *ProvisionResource_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +func (m *ProvisionResource_Request) GetConnection() *DynamicValue { + if m != nil { + return m.Connection + } + return nil +} + +type ProvisionResource_Response struct { + Output string `protobuf:"bytes,1,opt,name=output,proto3" json:"output,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProvisionResource_Response) Reset() { *m = ProvisionResource_Response{} } +func (m *ProvisionResource_Response) String() string { return proto.CompactTextString(m) } +func (*ProvisionResource_Response) ProtoMessage() {} +func (*ProvisionResource_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{19, 1} +} +func (m *ProvisionResource_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProvisionResource_Response.Unmarshal(m, b) +} +func (m *ProvisionResource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProvisionResource_Response.Marshal(b, m, deterministic) +} +func (dst *ProvisionResource_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProvisionResource_Response.Merge(dst, src) +} +func (m *ProvisionResource_Response) XXX_Size() int { + return xxx_messageInfo_ProvisionResource_Response.Size(m) +} +func (m *ProvisionResource_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ProvisionResource_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ProvisionResource_Response proto.InternalMessageInfo + +func (m *ProvisionResource_Response) GetOutput() string { + if m != nil { + return m.Output + } + return "" +} + +func (m *ProvisionResource_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +func init() { + proto.RegisterType((*DynamicValue)(nil), "tfplugin5.DynamicValue") + proto.RegisterType((*Diagnostic)(nil), "tfplugin5.Diagnostic") + proto.RegisterType((*AttributePath)(nil), "tfplugin5.AttributePath") + proto.RegisterType((*AttributePath_Step)(nil), "tfplugin5.AttributePath.Step") + proto.RegisterType((*Stop)(nil), "tfplugin5.Stop") + proto.RegisterType((*Stop_Request)(nil), "tfplugin5.Stop.Request") + proto.RegisterType((*Stop_Response)(nil), "tfplugin5.Stop.Response") + proto.RegisterType((*RawState)(nil), "tfplugin5.RawState") + proto.RegisterMapType((map[string]string)(nil), "tfplugin5.RawState.FlatmapEntry") + proto.RegisterType((*Schema)(nil), "tfplugin5.Schema") + proto.RegisterType((*Schema_Block)(nil), "tfplugin5.Schema.Block") + proto.RegisterType((*Schema_Attribute)(nil), "tfplugin5.Schema.Attribute") + proto.RegisterType((*Schema_NestedBlock)(nil), "tfplugin5.Schema.NestedBlock") + proto.RegisterType((*GetProviderSchema)(nil), "tfplugin5.GetProviderSchema") + proto.RegisterType((*GetProviderSchema_Request)(nil), "tfplugin5.GetProviderSchema.Request") + proto.RegisterType((*GetProviderSchema_Response)(nil), "tfplugin5.GetProviderSchema.Response") + proto.RegisterMapType((map[string]*Schema)(nil), "tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry") + proto.RegisterMapType((map[string]*Schema)(nil), "tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry") + proto.RegisterType((*PrepareProviderConfig)(nil), "tfplugin5.PrepareProviderConfig") + proto.RegisterType((*PrepareProviderConfig_Request)(nil), "tfplugin5.PrepareProviderConfig.Request") + proto.RegisterType((*PrepareProviderConfig_Response)(nil), "tfplugin5.PrepareProviderConfig.Response") + proto.RegisterType((*UpgradeResourceState)(nil), "tfplugin5.UpgradeResourceState") + proto.RegisterType((*UpgradeResourceState_Request)(nil), "tfplugin5.UpgradeResourceState.Request") + proto.RegisterType((*UpgradeResourceState_Response)(nil), "tfplugin5.UpgradeResourceState.Response") + proto.RegisterType((*ValidateResourceTypeConfig)(nil), "tfplugin5.ValidateResourceTypeConfig") + proto.RegisterType((*ValidateResourceTypeConfig_Request)(nil), "tfplugin5.ValidateResourceTypeConfig.Request") + proto.RegisterType((*ValidateResourceTypeConfig_Response)(nil), "tfplugin5.ValidateResourceTypeConfig.Response") + proto.RegisterType((*ValidateDataSourceConfig)(nil), "tfplugin5.ValidateDataSourceConfig") + proto.RegisterType((*ValidateDataSourceConfig_Request)(nil), "tfplugin5.ValidateDataSourceConfig.Request") + proto.RegisterType((*ValidateDataSourceConfig_Response)(nil), "tfplugin5.ValidateDataSourceConfig.Response") + proto.RegisterType((*Configure)(nil), "tfplugin5.Configure") + proto.RegisterType((*Configure_Request)(nil), "tfplugin5.Configure.Request") + proto.RegisterType((*Configure_Response)(nil), "tfplugin5.Configure.Response") + proto.RegisterType((*ReadResource)(nil), "tfplugin5.ReadResource") + proto.RegisterType((*ReadResource_Request)(nil), "tfplugin5.ReadResource.Request") + proto.RegisterType((*ReadResource_Response)(nil), "tfplugin5.ReadResource.Response") + proto.RegisterType((*PlanResourceChange)(nil), "tfplugin5.PlanResourceChange") + proto.RegisterType((*PlanResourceChange_Request)(nil), "tfplugin5.PlanResourceChange.Request") + proto.RegisterType((*PlanResourceChange_Response)(nil), "tfplugin5.PlanResourceChange.Response") + proto.RegisterType((*ApplyResourceChange)(nil), "tfplugin5.ApplyResourceChange") + proto.RegisterType((*ApplyResourceChange_Request)(nil), "tfplugin5.ApplyResourceChange.Request") + proto.RegisterType((*ApplyResourceChange_Response)(nil), "tfplugin5.ApplyResourceChange.Response") + proto.RegisterType((*ImportResourceState)(nil), "tfplugin5.ImportResourceState") + proto.RegisterType((*ImportResourceState_Request)(nil), "tfplugin5.ImportResourceState.Request") + proto.RegisterType((*ImportResourceState_ImportedResource)(nil), "tfplugin5.ImportResourceState.ImportedResource") + proto.RegisterType((*ImportResourceState_Response)(nil), "tfplugin5.ImportResourceState.Response") + proto.RegisterType((*ReadDataSource)(nil), "tfplugin5.ReadDataSource") + proto.RegisterType((*ReadDataSource_Request)(nil), "tfplugin5.ReadDataSource.Request") + proto.RegisterType((*ReadDataSource_Response)(nil), "tfplugin5.ReadDataSource.Response") + proto.RegisterType((*GetProvisionerSchema)(nil), "tfplugin5.GetProvisionerSchema") + proto.RegisterType((*GetProvisionerSchema_Request)(nil), "tfplugin5.GetProvisionerSchema.Request") + proto.RegisterType((*GetProvisionerSchema_Response)(nil), "tfplugin5.GetProvisionerSchema.Response") + proto.RegisterType((*ValidateProvisionerConfig)(nil), "tfplugin5.ValidateProvisionerConfig") + proto.RegisterType((*ValidateProvisionerConfig_Request)(nil), "tfplugin5.ValidateProvisionerConfig.Request") + proto.RegisterType((*ValidateProvisionerConfig_Response)(nil), "tfplugin5.ValidateProvisionerConfig.Response") + proto.RegisterType((*ProvisionResource)(nil), "tfplugin5.ProvisionResource") + proto.RegisterType((*ProvisionResource_Request)(nil), "tfplugin5.ProvisionResource.Request") + proto.RegisterType((*ProvisionResource_Response)(nil), "tfplugin5.ProvisionResource.Response") + proto.RegisterEnum("tfplugin5.Diagnostic_Severity", Diagnostic_Severity_name, Diagnostic_Severity_value) + proto.RegisterEnum("tfplugin5.Schema_NestedBlock_NestingMode", Schema_NestedBlock_NestingMode_name, Schema_NestedBlock_NestingMode_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ProviderClient is the client API for Provider service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ProviderClient interface { + // ////// Information about what a provider supports/expects + GetSchema(ctx context.Context, in *GetProviderSchema_Request, opts ...grpc.CallOption) (*GetProviderSchema_Response, error) + PrepareProviderConfig(ctx context.Context, in *PrepareProviderConfig_Request, opts ...grpc.CallOption) (*PrepareProviderConfig_Response, error) + ValidateResourceTypeConfig(ctx context.Context, in *ValidateResourceTypeConfig_Request, opts ...grpc.CallOption) (*ValidateResourceTypeConfig_Response, error) + ValidateDataSourceConfig(ctx context.Context, in *ValidateDataSourceConfig_Request, opts ...grpc.CallOption) (*ValidateDataSourceConfig_Response, error) + UpgradeResourceState(ctx context.Context, in *UpgradeResourceState_Request, opts ...grpc.CallOption) (*UpgradeResourceState_Response, error) + // ////// One-time initialization, called before other functions below + Configure(ctx context.Context, in *Configure_Request, opts ...grpc.CallOption) (*Configure_Response, error) + // ////// Managed Resource Lifecycle + ReadResource(ctx context.Context, in *ReadResource_Request, opts ...grpc.CallOption) (*ReadResource_Response, error) + PlanResourceChange(ctx context.Context, in *PlanResourceChange_Request, opts ...grpc.CallOption) (*PlanResourceChange_Response, error) + ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error) + ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error) + ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error) + // ////// Graceful Shutdown + Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) +} + +type providerClient struct { + cc *grpc.ClientConn +} + +func NewProviderClient(cc *grpc.ClientConn) ProviderClient { + return &providerClient{cc} +} + +func (c *providerClient) GetSchema(ctx context.Context, in *GetProviderSchema_Request, opts ...grpc.CallOption) (*GetProviderSchema_Response, error) { + out := new(GetProviderSchema_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/GetSchema", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) PrepareProviderConfig(ctx context.Context, in *PrepareProviderConfig_Request, opts ...grpc.CallOption) (*PrepareProviderConfig_Response, error) { + out := new(PrepareProviderConfig_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/PrepareProviderConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ValidateResourceTypeConfig(ctx context.Context, in *ValidateResourceTypeConfig_Request, opts ...grpc.CallOption) (*ValidateResourceTypeConfig_Response, error) { + out := new(ValidateResourceTypeConfig_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ValidateResourceTypeConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ValidateDataSourceConfig(ctx context.Context, in *ValidateDataSourceConfig_Request, opts ...grpc.CallOption) (*ValidateDataSourceConfig_Response, error) { + out := new(ValidateDataSourceConfig_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ValidateDataSourceConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) UpgradeResourceState(ctx context.Context, in *UpgradeResourceState_Request, opts ...grpc.CallOption) (*UpgradeResourceState_Response, error) { + out := new(UpgradeResourceState_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/UpgradeResourceState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) Configure(ctx context.Context, in *Configure_Request, opts ...grpc.CallOption) (*Configure_Response, error) { + out := new(Configure_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/Configure", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ReadResource(ctx context.Context, in *ReadResource_Request, opts ...grpc.CallOption) (*ReadResource_Response, error) { + out := new(ReadResource_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ReadResource", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) PlanResourceChange(ctx context.Context, in *PlanResourceChange_Request, opts ...grpc.CallOption) (*PlanResourceChange_Response, error) { + out := new(PlanResourceChange_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/PlanResourceChange", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error) { + out := new(ApplyResourceChange_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ApplyResourceChange", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error) { + out := new(ImportResourceState_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ImportResourceState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error) { + out := new(ReadDataSource_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ReadDataSource", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) { + out := new(Stop_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/Stop", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ProviderServer is the server API for Provider service. +type ProviderServer interface { + // ////// Information about what a provider supports/expects + GetSchema(context.Context, *GetProviderSchema_Request) (*GetProviderSchema_Response, error) + PrepareProviderConfig(context.Context, *PrepareProviderConfig_Request) (*PrepareProviderConfig_Response, error) + ValidateResourceTypeConfig(context.Context, *ValidateResourceTypeConfig_Request) (*ValidateResourceTypeConfig_Response, error) + ValidateDataSourceConfig(context.Context, *ValidateDataSourceConfig_Request) (*ValidateDataSourceConfig_Response, error) + UpgradeResourceState(context.Context, *UpgradeResourceState_Request) (*UpgradeResourceState_Response, error) + // ////// One-time initialization, called before other functions below + Configure(context.Context, *Configure_Request) (*Configure_Response, error) + // ////// Managed Resource Lifecycle + ReadResource(context.Context, *ReadResource_Request) (*ReadResource_Response, error) + PlanResourceChange(context.Context, *PlanResourceChange_Request) (*PlanResourceChange_Response, error) + ApplyResourceChange(context.Context, *ApplyResourceChange_Request) (*ApplyResourceChange_Response, error) + ImportResourceState(context.Context, *ImportResourceState_Request) (*ImportResourceState_Response, error) + ReadDataSource(context.Context, *ReadDataSource_Request) (*ReadDataSource_Response, error) + // ////// Graceful Shutdown + Stop(context.Context, *Stop_Request) (*Stop_Response, error) +} + +func RegisterProviderServer(s *grpc.Server, srv ProviderServer) { + s.RegisterService(&_Provider_serviceDesc, srv) +} + +func _Provider_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetProviderSchema_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).GetSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/GetSchema", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).GetSchema(ctx, req.(*GetProviderSchema_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_PrepareProviderConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PrepareProviderConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).PrepareProviderConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/PrepareProviderConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).PrepareProviderConfig(ctx, req.(*PrepareProviderConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ValidateResourceTypeConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateResourceTypeConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ValidateResourceTypeConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ValidateResourceTypeConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ValidateResourceTypeConfig(ctx, req.(*ValidateResourceTypeConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ValidateDataSourceConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateDataSourceConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ValidateDataSourceConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ValidateDataSourceConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ValidateDataSourceConfig(ctx, req.(*ValidateDataSourceConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_UpgradeResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpgradeResourceState_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).UpgradeResourceState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/UpgradeResourceState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).UpgradeResourceState(ctx, req.(*UpgradeResourceState_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_Configure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Configure_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).Configure(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/Configure", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).Configure(ctx, req.(*Configure_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ReadResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadResource_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ReadResource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ReadResource", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ReadResource(ctx, req.(*ReadResource_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_PlanResourceChange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PlanResourceChange_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).PlanResourceChange(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/PlanResourceChange", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).PlanResourceChange(ctx, req.(*PlanResourceChange_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ApplyResourceChange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ApplyResourceChange_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ApplyResourceChange(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ApplyResourceChange", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ApplyResourceChange(ctx, req.(*ApplyResourceChange_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ImportResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ImportResourceState_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ImportResourceState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ImportResourceState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ImportResourceState(ctx, req.(*ImportResourceState_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ReadDataSource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadDataSource_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ReadDataSource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ReadDataSource", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ReadDataSource(ctx, req.(*ReadDataSource_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Stop_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).Stop(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/Stop", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).Stop(ctx, req.(*Stop_Request)) + } + return interceptor(ctx, in, info, handler) +} + +var _Provider_serviceDesc = grpc.ServiceDesc{ + ServiceName: "tfplugin5.Provider", + HandlerType: (*ProviderServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetSchema", + Handler: _Provider_GetSchema_Handler, + }, + { + MethodName: "PrepareProviderConfig", + Handler: _Provider_PrepareProviderConfig_Handler, + }, + { + MethodName: "ValidateResourceTypeConfig", + Handler: _Provider_ValidateResourceTypeConfig_Handler, + }, + { + MethodName: "ValidateDataSourceConfig", + Handler: _Provider_ValidateDataSourceConfig_Handler, + }, + { + MethodName: "UpgradeResourceState", + Handler: _Provider_UpgradeResourceState_Handler, + }, + { + MethodName: "Configure", + Handler: _Provider_Configure_Handler, + }, + { + MethodName: "ReadResource", + Handler: _Provider_ReadResource_Handler, + }, + { + MethodName: "PlanResourceChange", + Handler: _Provider_PlanResourceChange_Handler, + }, + { + MethodName: "ApplyResourceChange", + Handler: _Provider_ApplyResourceChange_Handler, + }, + { + MethodName: "ImportResourceState", + Handler: _Provider_ImportResourceState_Handler, + }, + { + MethodName: "ReadDataSource", + Handler: _Provider_ReadDataSource_Handler, + }, + { + MethodName: "Stop", + Handler: _Provider_Stop_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "tfplugin5.proto", +} + +// ProvisionerClient is the client API for Provisioner service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ProvisionerClient interface { + GetSchema(ctx context.Context, in *GetProvisionerSchema_Request, opts ...grpc.CallOption) (*GetProvisionerSchema_Response, error) + ValidateProvisionerConfig(ctx context.Context, in *ValidateProvisionerConfig_Request, opts ...grpc.CallOption) (*ValidateProvisionerConfig_Response, error) + ProvisionResource(ctx context.Context, in *ProvisionResource_Request, opts ...grpc.CallOption) (Provisioner_ProvisionResourceClient, error) + Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) +} + +type provisionerClient struct { + cc *grpc.ClientConn +} + +func NewProvisionerClient(cc *grpc.ClientConn) ProvisionerClient { + return &provisionerClient{cc} +} + +func (c *provisionerClient) GetSchema(ctx context.Context, in *GetProvisionerSchema_Request, opts ...grpc.CallOption) (*GetProvisionerSchema_Response, error) { + out := new(GetProvisionerSchema_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/GetSchema", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *provisionerClient) ValidateProvisionerConfig(ctx context.Context, in *ValidateProvisionerConfig_Request, opts ...grpc.CallOption) (*ValidateProvisionerConfig_Response, error) { + out := new(ValidateProvisionerConfig_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/ValidateProvisionerConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *provisionerClient) ProvisionResource(ctx context.Context, in *ProvisionResource_Request, opts ...grpc.CallOption) (Provisioner_ProvisionResourceClient, error) { + stream, err := c.cc.NewStream(ctx, &_Provisioner_serviceDesc.Streams[0], "/tfplugin5.Provisioner/ProvisionResource", opts...) + if err != nil { + return nil, err + } + x := &provisionerProvisionResourceClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Provisioner_ProvisionResourceClient interface { + Recv() (*ProvisionResource_Response, error) + grpc.ClientStream +} + +type provisionerProvisionResourceClient struct { + grpc.ClientStream +} + +func (x *provisionerProvisionResourceClient) Recv() (*ProvisionResource_Response, error) { + m := new(ProvisionResource_Response) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *provisionerClient) Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) { + out := new(Stop_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/Stop", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ProvisionerServer is the server API for Provisioner service. +type ProvisionerServer interface { + GetSchema(context.Context, *GetProvisionerSchema_Request) (*GetProvisionerSchema_Response, error) + ValidateProvisionerConfig(context.Context, *ValidateProvisionerConfig_Request) (*ValidateProvisionerConfig_Response, error) + ProvisionResource(*ProvisionResource_Request, Provisioner_ProvisionResourceServer) error + Stop(context.Context, *Stop_Request) (*Stop_Response, error) +} + +func RegisterProvisionerServer(s *grpc.Server, srv ProvisionerServer) { + s.RegisterService(&_Provisioner_serviceDesc, srv) +} + +func _Provisioner_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetProvisionerSchema_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProvisionerServer).GetSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provisioner/GetSchema", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProvisionerServer).GetSchema(ctx, req.(*GetProvisionerSchema_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provisioner_ValidateProvisionerConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateProvisionerConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProvisionerServer).ValidateProvisionerConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provisioner/ValidateProvisionerConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProvisionerServer).ValidateProvisionerConfig(ctx, req.(*ValidateProvisionerConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provisioner_ProvisionResource_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ProvisionResource_Request) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ProvisionerServer).ProvisionResource(m, &provisionerProvisionResourceServer{stream}) +} + +type Provisioner_ProvisionResourceServer interface { + Send(*ProvisionResource_Response) error + grpc.ServerStream +} + +type provisionerProvisionResourceServer struct { + grpc.ServerStream +} + +func (x *provisionerProvisionResourceServer) Send(m *ProvisionResource_Response) error { + return x.ServerStream.SendMsg(m) +} + +func _Provisioner_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Stop_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProvisionerServer).Stop(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provisioner/Stop", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProvisionerServer).Stop(ctx, req.(*Stop_Request)) + } + return interceptor(ctx, in, info, handler) +} + +var _Provisioner_serviceDesc = grpc.ServiceDesc{ + ServiceName: "tfplugin5.Provisioner", + HandlerType: (*ProvisionerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetSchema", + Handler: _Provisioner_GetSchema_Handler, + }, + { + MethodName: "ValidateProvisionerConfig", + Handler: _Provisioner_ValidateProvisionerConfig_Handler, + }, + { + MethodName: "Stop", + Handler: _Provisioner_Stop_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ProvisionResource", + Handler: _Provisioner_ProvisionResource_Handler, + ServerStreams: true, + }, + }, + Metadata: "tfplugin5.proto", +} + +func init() { proto.RegisterFile("tfplugin5.proto", fileDescriptor_tfplugin5_56820f4fb67360c5) } + +var fileDescriptor_tfplugin5_56820f4fb67360c5 = []byte{ + // 1876 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcd, 0x6f, 0x23, 0x49, + 0x15, 0x9f, 0x76, 0xdb, 0x89, 0xfd, 0x9c, 0x0f, 0xa7, 0x66, 0x76, 0x30, 0xbd, 0xbb, 0x10, 0xcc, + 0x47, 0xb2, 0xda, 0x1d, 0xcf, 0x2a, 0x03, 0xbb, 0x4b, 0x18, 0xad, 0xc8, 0x66, 0x42, 0x26, 0x62, + 0x26, 0x1b, 0xca, 0xf3, 0x81, 0x84, 0xb4, 0x56, 0x8d, 0xbb, 0xe2, 0x69, 0xc6, 0xee, 0xee, 0xad, + 0x2e, 0x67, 0x62, 0x71, 0x44, 0x70, 0xe6, 0xc2, 0x87, 0xc4, 0xc7, 0x85, 0x03, 0xff, 0x00, 0x07, + 0xe0, 0xc6, 0x89, 0x7f, 0x80, 0x1b, 0x70, 0x42, 0x70, 0x43, 0x1c, 0xe1, 0x82, 0x84, 0xea, 0xab, + 0xbb, 0x6c, 0xb7, 0x93, 0x9e, 0x64, 0x57, 0x88, 0x5b, 0x57, 0xbd, 0x5f, 0xbd, 0xf7, 0xab, 0xf7, + 0x5e, 0xbd, 0x57, 0x65, 0xc3, 0x2a, 0x3f, 0x8e, 0x07, 0xa3, 0x7e, 0x10, 0x7e, 0xa9, 0x1d, 0xb3, + 0x88, 0x47, 0xa8, 0x96, 0x4e, 0xb4, 0x6e, 0xc3, 0xd2, 0x9d, 0x71, 0x48, 0x86, 0x41, 0xef, 0x11, + 0x19, 0x8c, 0x28, 0x6a, 0xc2, 0xe2, 0x30, 0xe9, 0xc7, 0xa4, 0xf7, 0xac, 0xe9, 0xac, 0x3b, 0x9b, + 0x4b, 0xd8, 0x0c, 0x11, 0x82, 0xf2, 0xb7, 0x93, 0x28, 0x6c, 0x96, 0xe4, 0xb4, 0xfc, 0x6e, 0xfd, + 0xd5, 0x01, 0xb8, 0x13, 0x90, 0x7e, 0x18, 0x25, 0x3c, 0xe8, 0xa1, 0x6d, 0xa8, 0x26, 0xf4, 0x84, + 0xb2, 0x80, 0x8f, 0xe5, 0xea, 0x95, 0xad, 0x4f, 0xb5, 0x33, 0xdb, 0x19, 0xb0, 0xdd, 0xd1, 0x28, + 0x9c, 0xe2, 0x85, 0xe1, 0x64, 0x34, 0x1c, 0x12, 0x36, 0x96, 0x16, 0x6a, 0xd8, 0x0c, 0xd1, 0x75, + 0x58, 0xf0, 0x29, 0x27, 0xc1, 0xa0, 0xe9, 0x4a, 0x81, 0x1e, 0xa1, 0xb7, 0xa0, 0x46, 0x38, 0x67, + 0xc1, 0x93, 0x11, 0xa7, 0xcd, 0xf2, 0xba, 0xb3, 0x59, 0xdf, 0x6a, 0x5a, 0xe6, 0x76, 0x8c, 0xec, + 0x88, 0xf0, 0xa7, 0x38, 0x83, 0xb6, 0x6e, 0x42, 0xd5, 0xd8, 0x47, 0x75, 0x58, 0x3c, 0x38, 0x7c, + 0xb4, 0x73, 0xef, 0xe0, 0x4e, 0xe3, 0x0a, 0xaa, 0x41, 0x65, 0x0f, 0xe3, 0xf7, 0x71, 0xc3, 0x11, + 0xf3, 0x8f, 0x77, 0xf0, 0xe1, 0xc1, 0xe1, 0x7e, 0xa3, 0xd4, 0xfa, 0xb3, 0x03, 0xcb, 0x13, 0xda, + 0xd0, 0x2d, 0xa8, 0x24, 0x9c, 0xc6, 0x49, 0xd3, 0x59, 0x77, 0x37, 0xeb, 0x5b, 0xaf, 0xce, 0x33, + 0xdb, 0xee, 0x70, 0x1a, 0x63, 0x85, 0xf5, 0x7e, 0xe8, 0x40, 0x59, 0x8c, 0xd1, 0x06, 0xac, 0xa4, + 0x6c, 0xba, 0x21, 0x19, 0x52, 0xe9, 0xac, 0xda, 0xdd, 0x2b, 0x78, 0x39, 0x9d, 0x3f, 0x24, 0x43, + 0x8a, 0xda, 0x80, 0xe8, 0x80, 0x0e, 0x69, 0xc8, 0xbb, 0xcf, 0xe8, 0xb8, 0x9b, 0x70, 0x16, 0x84, + 0x7d, 0xe5, 0x9e, 0xbb, 0x57, 0x70, 0x43, 0xcb, 0xbe, 0x4e, 0xc7, 0x1d, 0x29, 0x41, 0x9b, 0xb0, + 0x6a, 0xe3, 0x83, 0x90, 0x4b, 0x97, 0xb9, 0x42, 0x73, 0x06, 0x3e, 0x08, 0xf9, 0x7b, 0x20, 0x22, + 0x35, 0xa0, 0x3d, 0x1e, 0xb1, 0xd6, 0x2d, 0x41, 0x2b, 0x8a, 0xbd, 0x1a, 0x2c, 0x62, 0xfa, 0xe1, + 0x88, 0x26, 0xdc, 0x5b, 0x87, 0x2a, 0xa6, 0x49, 0x1c, 0x85, 0x09, 0x45, 0xd7, 0xa0, 0xb2, 0xc7, + 0x58, 0xc4, 0x14, 0x49, 0xac, 0x06, 0xad, 0x1f, 0x39, 0x50, 0xc5, 0xe4, 0x79, 0x87, 0x13, 0x4e, + 0xd3, 0xd4, 0x70, 0xb2, 0xd4, 0x40, 0xdb, 0xb0, 0x78, 0x3c, 0x20, 0x7c, 0x48, 0xe2, 0x66, 0x49, + 0x3a, 0x69, 0xdd, 0x72, 0x92, 0x59, 0xd9, 0xfe, 0x9a, 0x82, 0xec, 0x85, 0x9c, 0x8d, 0xb1, 0x59, + 0xe0, 0x6d, 0xc3, 0x92, 0x2d, 0x40, 0x0d, 0x70, 0x9f, 0xd1, 0xb1, 0x26, 0x20, 0x3e, 0x05, 0xa9, + 0x13, 0x91, 0xaf, 0x3a, 0x57, 0xd4, 0x60, 0xbb, 0xf4, 0x8e, 0xd3, 0xfa, 0x7b, 0x05, 0x16, 0x3a, + 0xbd, 0xa7, 0x74, 0x48, 0x44, 0x4a, 0x9d, 0x50, 0x96, 0x04, 0x9a, 0x99, 0x8b, 0xcd, 0x10, 0xdd, + 0x80, 0xca, 0x93, 0x41, 0xd4, 0x7b, 0x26, 0x97, 0xd7, 0xb7, 0x3e, 0x61, 0x51, 0x53, 0x6b, 0xdb, + 0xef, 0x09, 0x31, 0x56, 0x28, 0xef, 0x17, 0x0e, 0x54, 0xe4, 0xc4, 0x19, 0x2a, 0xbf, 0x02, 0x90, + 0x06, 0x2f, 0xd1, 0x5b, 0x7e, 0x79, 0x56, 0x6f, 0x9a, 0x1e, 0xd8, 0x82, 0xa3, 0x77, 0xa1, 0x2e, + 0x2d, 0x75, 0xf9, 0x38, 0xa6, 0x49, 0xd3, 0x9d, 0xc9, 0x2a, 0xbd, 0xfa, 0x90, 0x26, 0x9c, 0xfa, + 0x8a, 0x1b, 0xc8, 0x15, 0x0f, 0xc4, 0x02, 0xef, 0x0f, 0x0e, 0xd4, 0x52, 0xcd, 0x22, 0x1c, 0x59, + 0x56, 0x61, 0xf9, 0x2d, 0xe6, 0x84, 0x6e, 0x73, 0x7a, 0xc5, 0x37, 0x5a, 0x87, 0xba, 0x4f, 0x93, + 0x1e, 0x0b, 0x62, 0x2e, 0x36, 0xa4, 0x4e, 0x97, 0x3d, 0x85, 0x3c, 0xa8, 0x32, 0xfa, 0xe1, 0x28, + 0x60, 0xd4, 0x97, 0x27, 0xac, 0x8a, 0xd3, 0xb1, 0x90, 0x45, 0x12, 0x45, 0x06, 0xcd, 0x8a, 0x92, + 0x99, 0xb1, 0x90, 0xf5, 0xa2, 0x61, 0x3c, 0xe2, 0xd4, 0x6f, 0x2e, 0x28, 0x99, 0x19, 0xa3, 0x57, + 0xa0, 0x96, 0xd0, 0x30, 0x09, 0x78, 0x70, 0x42, 0x9b, 0x8b, 0x52, 0x98, 0x4d, 0x78, 0xbf, 0x2a, + 0x41, 0xdd, 0xda, 0x25, 0x7a, 0x19, 0x6a, 0x82, 0xab, 0x75, 0x4c, 0x70, 0x55, 0x4c, 0xc8, 0xf3, + 0xf1, 0x62, 0x61, 0x44, 0xbb, 0xb0, 0x18, 0xd2, 0x84, 0x8b, 0x33, 0xe4, 0xca, 0xea, 0xf4, 0xda, + 0x99, 0x1e, 0x96, 0xdf, 0x41, 0xd8, 0xbf, 0x1f, 0xf9, 0x14, 0x9b, 0x95, 0x82, 0xd0, 0x30, 0x08, + 0xbb, 0x01, 0xa7, 0xc3, 0x44, 0xfa, 0xc4, 0xc5, 0xd5, 0x61, 0x10, 0x1e, 0x88, 0xb1, 0x14, 0x92, + 0x53, 0x2d, 0xac, 0x68, 0x21, 0x39, 0x95, 0xc2, 0xd6, 0x7d, 0xb5, 0x33, 0xad, 0x71, 0xb2, 0xf4, + 0x00, 0x2c, 0x74, 0x0e, 0x0e, 0xf7, 0xef, 0xed, 0x35, 0x1c, 0x54, 0x85, 0xf2, 0xbd, 0x83, 0xce, + 0x83, 0x46, 0x09, 0x2d, 0x82, 0xdb, 0xd9, 0x7b, 0xd0, 0x70, 0xc5, 0xc7, 0xfd, 0x9d, 0xa3, 0x46, + 0x59, 0x94, 0xa8, 0x7d, 0xfc, 0xfe, 0xc3, 0xa3, 0x46, 0xa5, 0xf5, 0x93, 0x32, 0xac, 0xed, 0x53, + 0x7e, 0xc4, 0xa2, 0x93, 0xc0, 0xa7, 0x4c, 0xf1, 0xb7, 0x0f, 0xf1, 0xbf, 0x5c, 0xeb, 0x14, 0xdf, + 0x80, 0x6a, 0xac, 0x91, 0xd2, 0x8d, 0xf5, 0xad, 0xb5, 0x99, 0xcd, 0xe3, 0x14, 0x82, 0x28, 0x34, + 0x18, 0x4d, 0xa2, 0x11, 0xeb, 0xd1, 0x6e, 0x22, 0x85, 0x26, 0xa7, 0xb7, 0xad, 0x65, 0x33, 0xe6, + 0xdb, 0xc6, 0x9e, 0xf8, 0x90, 0xab, 0xd5, 0x7c, 0xa2, 0x0e, 0xf8, 0x2a, 0x9b, 0x9c, 0x45, 0x03, + 0xb8, 0xea, 0x13, 0x4e, 0xba, 0x53, 0x96, 0x54, 0xfe, 0xdf, 0x2e, 0x66, 0xe9, 0x0e, 0xe1, 0xa4, + 0x33, 0x6b, 0x6b, 0xcd, 0x9f, 0x9e, 0x47, 0x6f, 0x43, 0xdd, 0x4f, 0x7b, 0x90, 0x08, 0x9e, 0xb0, + 0xf2, 0x52, 0x6e, 0x87, 0xc2, 0x36, 0xd2, 0x7b, 0x08, 0xd7, 0xf2, 0xf6, 0x93, 0x53, 0x97, 0x36, + 0xec, 0xba, 0x94, 0xeb, 0xe3, 0xac, 0x54, 0x79, 0x8f, 0xe1, 0x7a, 0x3e, 0xf9, 0x4b, 0x2a, 0x6e, + 0xfd, 0xc9, 0x81, 0x97, 0x8e, 0x18, 0x8d, 0x09, 0xa3, 0xc6, 0x6b, 0xbb, 0x51, 0x78, 0x1c, 0xf4, + 0xbd, 0xed, 0x34, 0x3d, 0xd0, 0x4d, 0x58, 0xe8, 0xc9, 0x49, 0x9d, 0x0f, 0xf6, 0xe9, 0xb1, 0xaf, + 0x04, 0x58, 0xc3, 0xbc, 0xef, 0x39, 0x56, 0x3e, 0x7d, 0x15, 0x56, 0x63, 0x65, 0xc1, 0xef, 0x16, + 0x53, 0xb3, 0x62, 0xf0, 0x8a, 0xca, 0x74, 0x34, 0x4a, 0x45, 0xa3, 0xd1, 0xfa, 0x41, 0x09, 0xae, + 0x3d, 0x8c, 0xfb, 0x8c, 0xf8, 0x34, 0x8d, 0x8a, 0x68, 0x26, 0x1e, 0xcb, 0x36, 0x77, 0x66, 0xd9, + 0xb0, 0x8a, 0x78, 0x69, 0xb2, 0x88, 0xbf, 0x09, 0x35, 0x46, 0x9e, 0x77, 0x13, 0xa1, 0x4e, 0xd6, + 0x88, 0xfa, 0xd6, 0xd5, 0x9c, 0xb6, 0x85, 0xab, 0x4c, 0x7f, 0x79, 0xdf, 0xb5, 0x9d, 0xf2, 0x2e, + 0xac, 0x8c, 0x14, 0x31, 0x5f, 0xeb, 0x38, 0xc7, 0x27, 0xcb, 0x06, 0xae, 0xfa, 0xe8, 0x85, 0x5d, + 0xf2, 0x3b, 0x07, 0xbc, 0x47, 0x64, 0x10, 0xf8, 0x82, 0x9c, 0xf6, 0x89, 0xe8, 0x0c, 0x3a, 0xea, + 0x8f, 0x0b, 0x3a, 0x26, 0x4b, 0x89, 0x52, 0xb1, 0x94, 0xd8, 0xb5, 0x36, 0x3f, 0x45, 0xde, 0x29, + 0x4c, 0xfe, 0x37, 0x0e, 0x34, 0x0d, 0xf9, 0xec, 0x3c, 0xfc, 0x5f, 0x50, 0xff, 0xad, 0x03, 0x35, + 0x45, 0x74, 0xc4, 0xa8, 0xd7, 0xcf, 0xb8, 0xbe, 0x0e, 0x6b, 0x9c, 0x32, 0x46, 0x8e, 0x23, 0x36, + 0xec, 0xda, 0x37, 0x86, 0x1a, 0x6e, 0xa4, 0x82, 0x47, 0x3a, 0xeb, 0xfe, 0x37, 0xdc, 0xff, 0xe9, + 0xc0, 0x12, 0xa6, 0xc4, 0x37, 0xf9, 0xe2, 0xf9, 0x05, 0x5d, 0x7d, 0x1b, 0x96, 0x7b, 0x23, 0xc6, + 0xc4, 0x2d, 0x53, 0x25, 0xf9, 0x39, 0xac, 0x97, 0x34, 0x5a, 0x1d, 0x98, 0xb1, 0xc5, 0xfd, 0x8b, + 0x50, 0x0b, 0xe9, 0xf3, 0x62, 0x47, 0xa5, 0x1a, 0xd2, 0xe7, 0x97, 0x3c, 0x25, 0xbf, 0x2e, 0x03, + 0x3a, 0x1a, 0x90, 0xd0, 0xec, 0x78, 0xf7, 0x29, 0x09, 0xfb, 0xd4, 0xfb, 0x8f, 0x53, 0x70, 0xe3, + 0xef, 0x40, 0x3d, 0x66, 0x41, 0xc4, 0x8a, 0x6d, 0x1b, 0x24, 0x56, 0x51, 0xde, 0x03, 0x14, 0xb3, + 0x28, 0x8e, 0x12, 0xea, 0x77, 0xb3, 0x1d, 0xbb, 0x67, 0x2b, 0x68, 0x98, 0x25, 0x87, 0x66, 0xe7, + 0x59, 0xa2, 0x94, 0x0b, 0x25, 0x0a, 0xfa, 0x2c, 0x2c, 0x2b, 0xc6, 0x31, 0x0b, 0x4e, 0x84, 0xc9, + 0x8a, 0xbc, 0xfe, 0x2d, 0xc9, 0xc9, 0x23, 0x35, 0xe7, 0xfd, 0xbc, 0x64, 0x85, 0xe4, 0x36, 0x2c, + 0xc7, 0x03, 0x12, 0x86, 0x45, 0x2b, 0xd8, 0x92, 0x46, 0x2b, 0x82, 0xbb, 0xe2, 0xda, 0x20, 0xef, + 0x87, 0x49, 0x97, 0xd1, 0x78, 0x40, 0x7a, 0x54, 0xc7, 0x67, 0xfe, 0xcb, 0x6c, 0xd5, 0xac, 0xc0, + 0x6a, 0x01, 0xda, 0x80, 0x55, 0x43, 0xc1, 0xd0, 0x76, 0x25, 0xed, 0x15, 0x3d, 0xad, 0x89, 0x5f, + 0xb8, 0x9f, 0xa3, 0x37, 0x00, 0x0d, 0x68, 0x9f, 0xf4, 0xc6, 0xf2, 0xbe, 0xdd, 0x4d, 0xc6, 0x09, + 0xa7, 0x43, 0x7d, 0x89, 0x6d, 0x28, 0x89, 0xa8, 0x9e, 0x1d, 0x39, 0xdf, 0xfa, 0xa3, 0x0b, 0x57, + 0x77, 0xe2, 0x78, 0x30, 0x9e, 0xca, 0x9b, 0x7f, 0x7f, 0xfc, 0x79, 0x33, 0x13, 0x0d, 0xf7, 0x45, + 0xa2, 0xf1, 0xc2, 0xe9, 0x92, 0xe3, 0xf9, 0x4a, 0x9e, 0xe7, 0xbd, 0xdf, 0x3b, 0x97, 0x3e, 0xc5, + 0x4d, 0x58, 0x34, 0x36, 0xd4, 0x9b, 0xc4, 0x0c, 0xa7, 0xc3, 0xea, 0x5e, 0x32, 0xac, 0xe5, 0x39, + 0x61, 0xfd, 0x47, 0x09, 0xae, 0x1e, 0x0c, 0xe3, 0x88, 0xf1, 0xc9, 0x5b, 0xc4, 0x5b, 0x05, 0xa3, + 0xba, 0x02, 0xa5, 0xc0, 0xd7, 0xef, 0xcf, 0x52, 0xe0, 0x7b, 0xa7, 0xd0, 0x50, 0xea, 0x68, 0x5a, + 0x52, 0xcf, 0x7d, 0xbd, 0x14, 0x4a, 0x08, 0x85, 0xb2, 0x1d, 0xe6, 0x4e, 0x38, 0xcc, 0xfb, 0xa5, + 0x1d, 0x8d, 0x0f, 0x00, 0x05, 0x9a, 0x46, 0xd7, 0x5c, 0xb7, 0x4d, 0x5b, 0xb8, 0x69, 0x99, 0xc8, + 0xd9, 0x7a, 0x7b, 0x9a, 0x3f, 0x5e, 0x0b, 0xa6, 0x66, 0x92, 0x8b, 0x57, 0xdf, 0xbf, 0x38, 0xb0, + 0x22, 0xfa, 0x4d, 0xd6, 0xe2, 0x3f, 0xbe, 0xe6, 0xce, 0x26, 0x5e, 0x3e, 0x95, 0x42, 0xa9, 0xa9, + 0xdd, 0x7c, 0xe1, 0xfd, 0xfd, 0xd4, 0x81, 0x6b, 0xe6, 0x99, 0x22, 0xda, 0x7a, 0xde, 0x93, 0xec, + 0xd4, 0xe2, 0x75, 0x4b, 0x54, 0x85, 0x14, 0x3b, 0xff, 0x51, 0x66, 0xa3, 0x2e, 0xce, 0xee, 0x67, + 0x0e, 0x7c, 0xd2, 0x5c, 0xb2, 0x2c, 0x8a, 0x1f, 0xc1, 0xb3, 0xe0, 0x23, 0xb9, 0x8c, 0xfc, 0xcd, + 0x81, 0xb5, 0x94, 0x56, 0x7a, 0x23, 0x49, 0x2e, 0x4e, 0x0b, 0xbd, 0x0d, 0xd0, 0x8b, 0xc2, 0x90, + 0xf6, 0xb8, 0xb9, 0xe7, 0x9f, 0x55, 0x73, 0x33, 0xa8, 0xf7, 0x2d, 0x6b, 0x3f, 0xd7, 0x61, 0x21, + 0x1a, 0xf1, 0x78, 0xc4, 0x75, 0x4a, 0xea, 0xd1, 0x85, 0xc3, 0xb0, 0xf5, 0xe3, 0x1a, 0x54, 0xcd, + 0x93, 0x0c, 0x7d, 0x13, 0x6a, 0xfb, 0x94, 0xeb, 0x1f, 0xab, 0x3e, 0x77, 0xce, 0x6b, 0x57, 0x25, + 0xd0, 0xe7, 0x0b, 0xbd, 0x89, 0xd1, 0x60, 0xce, 0xfb, 0x0f, 0x6d, 0x5a, 0xeb, 0x73, 0x11, 0xa9, + 0xa5, 0xd7, 0x0a, 0x20, 0xb5, 0xb5, 0xef, 0x9c, 0xf5, 0xf8, 0x40, 0x37, 0x2c, 0x45, 0xf3, 0x61, + 0xa9, 0xdd, 0x76, 0x51, 0xb8, 0x36, 0x3e, 0x9a, 0xff, 0x78, 0x40, 0xaf, 0xe7, 0xe8, 0x9a, 0x06, + 0xa5, 0x86, 0xdf, 0x28, 0x06, 0xd6, 0x66, 0x83, 0xfc, 0x37, 0x28, 0xda, 0xb0, 0xb4, 0xe4, 0x01, + 0x52, 0x73, 0x9b, 0xe7, 0x03, 0xb5, 0xa9, 0xbb, 0xd6, 0x1b, 0x03, 0xbd, 0x62, 0x2d, 0x4b, 0x67, + 0x53, 0xa5, 0xaf, 0xce, 0x91, 0x6a, 0x4d, 0xdf, 0x98, 0xbc, 0xf1, 0xa3, 0x4f, 0xdb, 0x6f, 0x5b, + 0x4b, 0x90, 0xea, 0x5b, 0x9f, 0x0f, 0xd0, 0x2a, 0x7b, 0x79, 0x57, 0x6a, 0x64, 0xa7, 0xe9, 0xac, + 0x38, 0x55, 0xff, 0x85, 0xf3, 0x60, 0xda, 0xc8, 0x71, 0xee, 0x05, 0x0c, 0xd9, 0xcb, 0x73, 0xe4, + 0xa9, 0x99, 0x8d, 0x73, 0x71, 0x99, 0x9d, 0x9c, 0xb6, 0x38, 0x61, 0x27, 0xaf, 0x6d, 0xe6, 0xd9, + 0xc9, 0xc7, 0x69, 0x3b, 0x8f, 0xa7, 0x3b, 0x21, 0xfa, 0xcc, 0x94, 0xa3, 0x33, 0x51, 0xaa, 0xbd, + 0x75, 0x16, 0x44, 0x2b, 0xfe, 0xb2, 0xfa, 0x29, 0x1f, 0x4d, 0xfc, 0x12, 0xca, 0xa3, 0x38, 0x55, + 0xd2, 0x9c, 0x15, 0xa8, 0xa5, 0x5b, 0xdf, 0x77, 0xa1, 0x6e, 0x35, 0x06, 0xf4, 0x81, 0x5d, 0x9c, + 0x36, 0x72, 0xca, 0x8e, 0xdd, 0xe3, 0x72, 0xb3, 0x7a, 0x0e, 0x50, 0x53, 0x3d, 0x3d, 0xa3, 0x1f, + 0xa1, 0xbc, 0xb3, 0x38, 0x83, 0x4a, 0x8d, 0xde, 0x28, 0x88, 0xd6, 0x96, 0x9f, 0xe4, 0xb4, 0x9a, + 0x89, 0xf2, 0x3b, 0x23, 0xcd, 0x2d, 0xbf, 0x79, 0x28, 0x65, 0xe1, 0x4d, 0xe7, 0x12, 0x81, 0x78, + 0xb2, 0x20, 0xff, 0xa3, 0xbb, 0xf5, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc8, 0x16, 0x0b, 0x32, + 0xb6, 0x1b, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.proto b/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.proto new file mode 100644 index 0000000000..370faf74cd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.proto @@ -0,0 +1,351 @@ +// Terraform Plugin RPC protocol version 5.0 +// +// This file defines version 5.0 of the RPC protocol. To implement a plugin +// against this protocol, copy this definition into your own codebase and +// use protoc to generate stubs for your target language. +// +// This file will be updated in-place in the source Terraform repository for +// any minor versions of protocol 5, but later minor versions will always be +// backwards compatible. Breaking changes, if any are required, will come +// in a subsequent major version with its own separate proto definition. +// +// Note that only the proto files included in a release tag of Terraform are +// official protocol releases. Proto files taken from other commits may include +// incomplete changes or features that did not make it into a final release. +// In all reasonable cases, plugin developers should take the proto file from +// the tag of the most recent release of Terraform, and not from the master +// branch or any other development branch. +// +syntax = "proto3"; + +package tfplugin5; + +// DynamicValue is an opaque encoding of terraform data, with the field name +// indicating the encoding scheme used. +message DynamicValue { + bytes msgpack = 1; + bytes json = 2; +} + +message Diagnostic { + enum Severity { + INVALID = 0; + ERROR = 1; + WARNING = 2; + } + Severity severity = 1; + string summary = 2; + string detail = 3; + AttributePath attribute = 4; +} + +message AttributePath { + message Step { + oneof selector { + // Set "attribute_name" to represent looking up an attribute + // in the current object value. + string attribute_name = 1; + // Set "element_key_*" to represent looking up an element in + // an indexable collection type. + string element_key_string = 2; + int64 element_key_int = 3; + } + } + repeated Step steps = 1; +} + +message Stop { + message Request { + } + message Response { + string Error = 1; + } +} + +// RawState holds the stored state for a resource to be upgraded by the +// provider. It can be in one of two formats, the current json encoded format +// in bytes, or the legacy flatmap format as a map of strings. +message RawState { + bytes json = 1; + map flatmap = 2; +} + +// Schema is the configuration schema for a Resource, Provider, or Provisioner. +message Schema { + message Block { + int64 version = 1; + repeated Attribute attributes = 2; + repeated NestedBlock block_types = 3; + } + + message Attribute { + string name = 1; + bytes type = 2; + string description = 3; + bool required = 4; + bool optional = 5; + bool computed = 6; + bool sensitive = 7; + } + + message NestedBlock { + enum NestingMode { + INVALID = 0; + SINGLE = 1; + LIST = 2; + SET = 3; + MAP = 4; + GROUP = 5; + } + + string type_name = 1; + Block block = 2; + NestingMode nesting = 3; + int64 min_items = 4; + int64 max_items = 5; + } + + // The version of the schema. + // Schemas are versioned, so that providers can upgrade a saved resource + // state when the schema is changed. + int64 version = 1; + + // Block is the top level configuration block for this schema. + Block block = 2; +} + +service Provider { + //////// Information about what a provider supports/expects + rpc GetSchema(GetProviderSchema.Request) returns (GetProviderSchema.Response); + rpc PrepareProviderConfig(PrepareProviderConfig.Request) returns (PrepareProviderConfig.Response); + rpc ValidateResourceTypeConfig(ValidateResourceTypeConfig.Request) returns (ValidateResourceTypeConfig.Response); + rpc ValidateDataSourceConfig(ValidateDataSourceConfig.Request) returns (ValidateDataSourceConfig.Response); + rpc UpgradeResourceState(UpgradeResourceState.Request) returns (UpgradeResourceState.Response); + + //////// One-time initialization, called before other functions below + rpc Configure(Configure.Request) returns (Configure.Response); + + //////// Managed Resource Lifecycle + rpc ReadResource(ReadResource.Request) returns (ReadResource.Response); + rpc PlanResourceChange(PlanResourceChange.Request) returns (PlanResourceChange.Response); + rpc ApplyResourceChange(ApplyResourceChange.Request) returns (ApplyResourceChange.Response); + rpc ImportResourceState(ImportResourceState.Request) returns (ImportResourceState.Response); + + rpc ReadDataSource(ReadDataSource.Request) returns (ReadDataSource.Response); + + //////// Graceful Shutdown + rpc Stop(Stop.Request) returns (Stop.Response); +} + +message GetProviderSchema { + message Request { + } + message Response { + Schema provider = 1; + map resource_schemas = 2; + map data_source_schemas = 3; + repeated Diagnostic diagnostics = 4; + } +} + +message PrepareProviderConfig { + message Request { + DynamicValue config = 1; + } + message Response { + DynamicValue prepared_config = 1; + repeated Diagnostic diagnostics = 2; + } +} + +message UpgradeResourceState { + message Request { + string type_name = 1; + + // version is the schema_version number recorded in the state file + int64 version = 2; + + // raw_state is the raw states as stored for the resource. Core does + // not have access to the schema of prior_version, so it's the + // provider's responsibility to interpret this value using the + // appropriate older schema. The raw_state will be the json encoded + // state, or a legacy flat-mapped format. + RawState raw_state = 3; + } + message Response { + // new_state is a msgpack-encoded data structure that, when interpreted with + // the _current_ schema for this resource type, is functionally equivalent to + // that which was given in prior_state_raw. + DynamicValue upgraded_state = 1; + + // diagnostics describes any errors encountered during migration that could not + // be safely resolved, and warnings about any possibly-risky assumptions made + // in the upgrade process. + repeated Diagnostic diagnostics = 2; + } +} + +message ValidateResourceTypeConfig { + message Request { + string type_name = 1; + DynamicValue config = 2; + } + message Response { + repeated Diagnostic diagnostics = 1; + } +} + +message ValidateDataSourceConfig { + message Request { + string type_name = 1; + DynamicValue config = 2; + } + message Response { + repeated Diagnostic diagnostics = 1; + } +} + +message Configure { + message Request { + string terraform_version = 1; + DynamicValue config = 2; + } + message Response { + repeated Diagnostic diagnostics = 1; + } +} + +message ReadResource { + message Request { + string type_name = 1; + DynamicValue current_state = 2; + } + message Response { + DynamicValue new_state = 1; + repeated Diagnostic diagnostics = 2; + } +} + +message PlanResourceChange { + message Request { + string type_name = 1; + DynamicValue prior_state = 2; + DynamicValue proposed_new_state = 3; + DynamicValue config = 4; + bytes prior_private = 5; + } + + message Response { + DynamicValue planned_state = 1; + repeated AttributePath requires_replace = 2; + bytes planned_private = 3; + repeated Diagnostic diagnostics = 4; + + + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + bool legacy_type_system = 5; + } +} + +message ApplyResourceChange { + message Request { + string type_name = 1; + DynamicValue prior_state = 2; + DynamicValue planned_state = 3; + DynamicValue config = 4; + bytes planned_private = 5; + } + message Response { + DynamicValue new_state = 1; + bytes private = 2; + repeated Diagnostic diagnostics = 3; + + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + bool legacy_type_system = 4; + } +} + +message ImportResourceState { + message Request { + string type_name = 1; + string id = 2; + } + + message ImportedResource { + string type_name = 1; + DynamicValue state = 2; + bytes private = 3; + } + + message Response { + repeated ImportedResource imported_resources = 1; + repeated Diagnostic diagnostics = 2; + } +} + +message ReadDataSource { + message Request { + string type_name = 1; + DynamicValue config = 2; + } + message Response { + DynamicValue state = 1; + repeated Diagnostic diagnostics = 2; + } +} + +service Provisioner { + rpc GetSchema(GetProvisionerSchema.Request) returns (GetProvisionerSchema.Response); + rpc ValidateProvisionerConfig(ValidateProvisionerConfig.Request) returns (ValidateProvisionerConfig.Response); + rpc ProvisionResource(ProvisionResource.Request) returns (stream ProvisionResource.Response); + rpc Stop(Stop.Request) returns (Stop.Response); +} + +message GetProvisionerSchema { + message Request { + } + message Response { + Schema provisioner = 1; + repeated Diagnostic diagnostics = 2; + } +} + +message ValidateProvisionerConfig { + message Request { + DynamicValue config = 1; + } + message Response { + repeated Diagnostic diagnostics = 1; + } +} + +message ProvisionResource { + message Request { + DynamicValue config = 1; + DynamicValue connection = 2; + } + message Response { + string output = 1; + repeated Diagnostic diagnostics = 2; + } +} diff --git a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/doc.go b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/doc.go new file mode 100644 index 0000000000..8f89909c6f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/doc.go @@ -0,0 +1,5 @@ +// Package blocktoattr includes some helper functions that can perform +// preprocessing on a HCL body where a configschema.Block schema is available +// in order to allow list and set attributes defined in the schema to be +// optionally written by the user as block syntax. +package blocktoattr diff --git a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/fixup.go b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/fixup.go new file mode 100644 index 0000000000..d8c2e7752b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/fixup.go @@ -0,0 +1,187 @@ +package blocktoattr + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcldec" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// FixUpBlockAttrs takes a raw HCL body and adds some additional normalization +// functionality to allow attributes that are specified as having list or set +// type in the schema to be written with HCL block syntax as multiple nested +// blocks with the attribute name as the block type. +// +// This partially restores some of the block/attribute confusion from HCL 1 +// so that existing patterns that depended on that confusion can continue to +// be used in the short term while we settle on a longer-term strategy. +// +// Most of the fixup work is actually done when the returned body is +// subsequently decoded, so while FixUpBlockAttrs always succeeds, the eventual +// decode of the body might not, if the content of the body is so ambiguous +// that there's no safe way to map it to the schema. +func FixUpBlockAttrs(body hcl.Body, schema *configschema.Block) hcl.Body { + // The schema should never be nil, but in practice it seems to be sometimes + // in the presence of poorly-configured test mocks, so we'll be robust + // by synthesizing an empty one. + if schema == nil { + schema = &configschema.Block{} + } + + return &fixupBody{ + original: body, + schema: schema, + names: ambiguousNames(schema), + } +} + +type fixupBody struct { + original hcl.Body + schema *configschema.Block + names map[string]struct{} +} + +// Content decodes content from the body. The given schema must be the lower-level +// representation of the same schema that was previously passed to FixUpBlockAttrs, +// or else the result is undefined. +func (b *fixupBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + schema = b.effectiveSchema(schema) + content, diags := b.original.Content(schema) + return b.fixupContent(content), diags +} + +func (b *fixupBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + schema = b.effectiveSchema(schema) + content, remain, diags := b.original.PartialContent(schema) + remain = &fixupBody{ + original: remain, + schema: b.schema, + names: b.names, + } + return b.fixupContent(content), remain, diags +} + +func (b *fixupBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + // FixUpBlockAttrs is not intended to be used in situations where we'd use + // JustAttributes, so we just pass this through verbatim to complete our + // implementation of hcl.Body. + return b.original.JustAttributes() +} + +func (b *fixupBody) MissingItemRange() hcl.Range { + return b.original.MissingItemRange() +} + +// effectiveSchema produces a derived *hcl.BodySchema by sniffing the body's +// content to determine whether the author has used attribute or block syntax +// for each of the ambigious attributes where both are permitted. +// +// The resulting schema will always contain all of the same names that are +// in the given schema, but some attribute schemas may instead be replaced by +// block header schemas. +func (b *fixupBody) effectiveSchema(given *hcl.BodySchema) *hcl.BodySchema { + return effectiveSchema(given, b.original, b.names, true) +} + +func (b *fixupBody) fixupContent(content *hcl.BodyContent) *hcl.BodyContent { + var ret hcl.BodyContent + ret.Attributes = make(hcl.Attributes) + for name, attr := range content.Attributes { + ret.Attributes[name] = attr + } + blockAttrVals := make(map[string][]*hcl.Block) + for _, block := range content.Blocks { + if _, exists := b.names[block.Type]; exists { + // If we get here then we've found a block type whose instances need + // to be re-interpreted as a list-of-objects attribute. We'll gather + // those up and fix them up below. + blockAttrVals[block.Type] = append(blockAttrVals[block.Type], block) + continue + } + + // We need to now re-wrap our inner body so it will be subject to the + // same attribute-as-block fixup when recursively decoded. + retBlock := *block // shallow copy + if blockS, ok := b.schema.BlockTypes[block.Type]; ok { + // Would be weird if not ok, but we'll allow it for robustness; body just won't be fixed up, then + retBlock.Body = FixUpBlockAttrs(retBlock.Body, &blockS.Block) + } + + ret.Blocks = append(ret.Blocks, &retBlock) + } + // No we'll install synthetic attributes for each of our fixups. We can't + // do this exactly because HCL's information model expects an attribute + // to be a single decl but we have multiple separate blocks. We'll + // approximate things, then, by using only our first block for the source + // location information. (We are guaranteed at least one by the above logic.) + for name, blocks := range blockAttrVals { + ret.Attributes[name] = &hcl.Attribute{ + Name: name, + Expr: &fixupBlocksExpr{ + blocks: blocks, + ety: b.schema.Attributes[name].Type.ElementType(), + }, + + Range: blocks[0].DefRange, + NameRange: blocks[0].TypeRange, + } + } + return &ret +} + +type fixupBlocksExpr struct { + blocks hcl.Blocks + ety cty.Type +} + +func (e *fixupBlocksExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + // In order to produce a suitable value for our expression we need to + // now decode the whole descendent block structure under each of our block + // bodies. + // + // That requires us to do something rather strange: we must construct a + // synthetic block type schema derived from the element type of the + // attribute, thus inverting our usual direction of lowering a schema + // into an implied type. Because a type is less detailed than a schema, + // the result is imprecise and in particular will just consider all + // the attributes to be optional and let the provider eventually decide + // whether to return errors if they turn out to be null when required. + schema := SchemaForCtyElementType(e.ety) // this schema's ImpliedType will match e.ety + spec := schema.DecoderSpec() + + vals := make([]cty.Value, len(e.blocks)) + var diags hcl.Diagnostics + for i, block := range e.blocks { + body := FixUpBlockAttrs(block.Body, schema) + val, blockDiags := hcldec.Decode(body, spec, ctx) + diags = append(diags, blockDiags...) + if val == cty.NilVal { + val = cty.UnknownVal(e.ety) + } + vals[i] = val + } + if len(vals) == 0 { + return cty.ListValEmpty(e.ety), diags + } + return cty.ListVal(vals), diags +} + +func (e *fixupBlocksExpr) Variables() []hcl.Traversal { + var ret []hcl.Traversal + schema := SchemaForCtyElementType(e.ety) + spec := schema.DecoderSpec() + for _, block := range e.blocks { + ret = append(ret, hcldec.Variables(block.Body, spec)...) + } + return ret +} + +func (e *fixupBlocksExpr) Range() hcl.Range { + // This is not really an appropriate range for the expression but it's + // the best we can do from here. + return e.blocks[0].DefRange +} + +func (e *fixupBlocksExpr) StartRange() hcl.Range { + return e.blocks[0].DefRange +} diff --git a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go new file mode 100644 index 0000000000..2f2463a5cd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go @@ -0,0 +1,145 @@ +package blocktoattr + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +func ambiguousNames(schema *configschema.Block) map[string]struct{} { + if schema == nil { + return nil + } + ambiguousNames := make(map[string]struct{}) + for name, attrS := range schema.Attributes { + aty := attrS.Type + if (aty.IsListType() || aty.IsSetType()) && aty.ElementType().IsObjectType() { + ambiguousNames[name] = struct{}{} + } + } + return ambiguousNames +} + +func effectiveSchema(given *hcl.BodySchema, body hcl.Body, ambiguousNames map[string]struct{}, dynamicExpanded bool) *hcl.BodySchema { + ret := &hcl.BodySchema{} + + appearsAsBlock := make(map[string]struct{}) + { + // We'll construct some throwaway schemas here just to probe for + // whether each of our ambiguous names seems to be being used as + // an attribute or a block. We need to check both because in JSON + // syntax we rely on the schema to decide between attribute or block + // interpretation and so JSON will always answer yes to both of + // these questions and we want to prefer the attribute interpretation + // in that case. + var probeSchema hcl.BodySchema + + for name := range ambiguousNames { + probeSchema = hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: name, + }, + }, + } + content, _, _ := body.PartialContent(&probeSchema) + if _, exists := content.Attributes[name]; exists { + // Can decode as an attribute, so we'll go with that. + continue + } + probeSchema = hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: name, + }, + }, + } + content, _, _ = body.PartialContent(&probeSchema) + if len(content.Blocks) > 0 { + // No attribute present and at least one block present, so + // we'll need to rewrite this one as a block for a successful + // result. + appearsAsBlock[name] = struct{}{} + } + } + if !dynamicExpanded { + // If we're deciding for a context where dynamic blocks haven't + // been expanded yet then we need to probe for those too. + probeSchema = hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "dynamic", + LabelNames: []string{"type"}, + }, + }, + } + content, _, _ := body.PartialContent(&probeSchema) + for _, block := range content.Blocks { + if _, exists := ambiguousNames[block.Labels[0]]; exists { + appearsAsBlock[block.Labels[0]] = struct{}{} + } + } + } + } + + for _, attrS := range given.Attributes { + if _, exists := appearsAsBlock[attrS.Name]; exists { + ret.Blocks = append(ret.Blocks, hcl.BlockHeaderSchema{ + Type: attrS.Name, + }) + } else { + ret.Attributes = append(ret.Attributes, attrS) + } + } + + // Anything that is specified as a block type in the input schema remains + // that way by just passing through verbatim. + ret.Blocks = append(ret.Blocks, given.Blocks...) + + return ret +} + +// SchemaForCtyElementType converts a cty object type into an +// approximately-equivalent configschema.Block representing the element of +// a list or set. If the given type is not an object type then this +// function will panic. +func SchemaForCtyElementType(ty cty.Type) *configschema.Block { + atys := ty.AttributeTypes() + ret := &configschema.Block{ + Attributes: make(map[string]*configschema.Attribute, len(atys)), + } + for name, aty := range atys { + ret.Attributes[name] = &configschema.Attribute{ + Type: aty, + Optional: true, + } + } + return ret +} + +// SchemaForCtyContainerType converts a cty list-of-object or set-of-object type +// into an approximately-equivalent configschema.NestedBlock. If the given type +// is not of the expected kind then this function will panic. +func SchemaForCtyContainerType(ty cty.Type) *configschema.NestedBlock { + var nesting configschema.NestingMode + switch { + case ty.IsListType(): + nesting = configschema.NestingList + case ty.IsSetType(): + nesting = configschema.NestingSet + default: + panic("unsuitable type") + } + nested := SchemaForCtyElementType(ty.ElementType()) + return &configschema.NestedBlock{ + Nesting: nesting, + Block: *nested, + } +} + +// TypeCanBeBlocks returns true if the given type is a list-of-object or +// set-of-object type, and would thus be subject to the blocktoattr fixup +// if used as an attribute type. +func TypeCanBeBlocks(ty cty.Type) bool { + return (ty.IsListType() || ty.IsSetType()) && ty.ElementType().IsObjectType() +} diff --git a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/variables.go b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/variables.go new file mode 100644 index 0000000000..e123b8aab7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/variables.go @@ -0,0 +1,43 @@ +package blocktoattr + +import ( + "github.com/hashicorp/hcl2/ext/dynblock" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcldec" + "github.com/hashicorp/terraform/configs/configschema" +) + +// ExpandedVariables finds all of the global variables referenced in the +// given body with the given schema while taking into account the possibilities +// both of "dynamic" blocks being expanded and the possibility of certain +// attributes being written instead as nested blocks as allowed by the +// FixUpBlockAttrs function. +// +// This function exists to allow variables to be analyzed prior to dynamic +// block expansion while also dealing with the fact that dynamic block expansion +// might in turn produce nested blocks that are subject to FixUpBlockAttrs. +// +// This is intended as a drop-in replacement for dynblock.VariablesHCLDec, +// which is itself a drop-in replacement for hcldec.Variables. +func ExpandedVariables(body hcl.Body, schema *configschema.Block) []hcl.Traversal { + rootNode := dynblock.WalkVariables(body) + return walkVariables(rootNode, body, schema) +} + +func walkVariables(node dynblock.WalkVariablesNode, body hcl.Body, schema *configschema.Block) []hcl.Traversal { + givenRawSchema := hcldec.ImpliedSchema(schema.DecoderSpec()) + ambiguousNames := ambiguousNames(schema) + effectiveRawSchema := effectiveSchema(givenRawSchema, body, ambiguousNames, false) + vars, children := node.Visit(effectiveRawSchema) + + for _, child := range children { + if blockS, exists := schema.BlockTypes[child.BlockTypeName]; exists { + vars = append(vars, walkVariables(child.Node, child.Body(), &blockS.Block)...) + } else if attrS, exists := schema.Attributes[child.BlockTypeName]; exists { + synthSchema := SchemaForCtyElementType(attrS.Type.ElementType()) + vars = append(vars, walkVariables(child.Node, child.Body(), synthSchema)...) + } + } + + return vars +} diff --git a/vendor/github.com/hashicorp/terraform/lang/data.go b/vendor/github.com/hashicorp/terraform/lang/data.go new file mode 100644 index 0000000000..80313d6c0b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/data.go @@ -0,0 +1,33 @@ +package lang + +import ( + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// Data is an interface whose implementations can provide cty.Value +// representations of objects identified by referenceable addresses from +// the addrs package. +// +// This interface will grow each time a new type of reference is added, and so +// implementations outside of the Terraform codebases are not advised. +// +// Each method returns a suitable value and optionally some diagnostics. If the +// returned diagnostics contains errors then the type of the returned value is +// used to construct an unknown value of the same type which is then used in +// place of the requested object so that type checking can still proceed. In +// cases where it's not possible to even determine a suitable result type, +// cty.DynamicVal is returned along with errors describing the problem. +type Data interface { + StaticValidateReferences(refs []*addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics + + GetCountAttr(addrs.CountAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetResourceInstance(addrs.ResourceInstance, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetLocalValue(addrs.LocalValue, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetModuleInstance(addrs.ModuleCallInstance, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetModuleInstanceOutput(addrs.ModuleCallOutput, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetPathAttr(addrs.PathAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetTerraformAttr(addrs.TerraformAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetInputVariable(addrs.InputVariable, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) +} diff --git a/vendor/github.com/hashicorp/terraform/lang/doc.go b/vendor/github.com/hashicorp/terraform/lang/doc.go new file mode 100644 index 0000000000..af5c5cac0d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/doc.go @@ -0,0 +1,5 @@ +// Package lang deals with the runtime aspects of Terraform's configuration +// language, with concerns such as expression evaluation. It is closely related +// to sibling package "configs", which is responsible for configuration +// parsing and static validation. +package lang diff --git a/vendor/github.com/hashicorp/terraform/lang/eval.go b/vendor/github.com/hashicorp/terraform/lang/eval.go new file mode 100644 index 0000000000..a3fb363ec7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/eval.go @@ -0,0 +1,477 @@ +package lang + +import ( + "fmt" + "log" + "strconv" + + "github.com/hashicorp/hcl2/ext/dynblock" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcldec" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/lang/blocktoattr" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// ExpandBlock expands any "dynamic" blocks present in the given body. The +// result is a body with those blocks expanded, ready to be evaluated with +// EvalBlock. +// +// If the returned diagnostics contains errors then the result may be +// incomplete or invalid. +func (s *Scope) ExpandBlock(body hcl.Body, schema *configschema.Block) (hcl.Body, tfdiags.Diagnostics) { + spec := schema.DecoderSpec() + + traversals := dynblock.ExpandVariablesHCLDec(body, spec) + refs, diags := References(traversals) + + ctx, ctxDiags := s.EvalContext(refs) + diags = diags.Append(ctxDiags) + + return dynblock.Expand(body, ctx), diags +} + +// EvalBlock evaluates the given body using the given block schema and returns +// a cty object value representing its contents. The type of the result conforms +// to the implied type of the given schema. +// +// This function does not automatically expand "dynamic" blocks within the +// body. If that is desired, first call the ExpandBlock method to obtain +// an expanded body to pass to this method. +// +// If the returned diagnostics contains errors then the result may be +// incomplete or invalid. +func (s *Scope) EvalBlock(body hcl.Body, schema *configschema.Block) (cty.Value, tfdiags.Diagnostics) { + spec := schema.DecoderSpec() + + refs, diags := ReferencesInBlock(body, schema) + + ctx, ctxDiags := s.EvalContext(refs) + diags = diags.Append(ctxDiags) + if diags.HasErrors() { + // We'll stop early if we found problems in the references, because + // it's likely evaluation will produce redundant copies of the same errors. + return cty.UnknownVal(schema.ImpliedType()), diags + } + + // HACK: In order to remain compatible with some assumptions made in + // Terraform v0.11 and earlier about the approximate equivalence of + // attribute vs. block syntax, we do a just-in-time fixup here to allow + // any attribute in the schema that has a list-of-objects or set-of-objects + // kind to potentially be populated instead by one or more nested blocks + // whose type is the attribute name. + body = blocktoattr.FixUpBlockAttrs(body, schema) + + val, evalDiags := hcldec.Decode(body, spec, ctx) + diags = diags.Append(evalDiags) + + return val, diags +} + +// EvalExpr evaluates a single expression in the receiving context and returns +// the resulting value. The value will be converted to the given type before +// it is returned if possible, or else an error diagnostic will be produced +// describing the conversion error. +// +// Pass an expected type of cty.DynamicPseudoType to skip automatic conversion +// and just obtain the returned value directly. +// +// If the returned diagnostics contains errors then the result may be +// incomplete, but will always be of the requested type. +func (s *Scope) EvalExpr(expr hcl.Expression, wantType cty.Type) (cty.Value, tfdiags.Diagnostics) { + refs, diags := ReferencesInExpr(expr) + + ctx, ctxDiags := s.EvalContext(refs) + diags = diags.Append(ctxDiags) + if diags.HasErrors() { + // We'll stop early if we found problems in the references, because + // it's likely evaluation will produce redundant copies of the same errors. + return cty.UnknownVal(wantType), diags + } + + val, evalDiags := expr.Value(ctx) + diags = diags.Append(evalDiags) + + if wantType != cty.DynamicPseudoType { + var convErr error + val, convErr = convert.Convert(val, wantType) + if convErr != nil { + val = cty.UnknownVal(wantType) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect value type", + Detail: fmt.Sprintf("Invalid expression value: %s.", tfdiags.FormatError(convErr)), + Subject: expr.Range().Ptr(), + }) + } + } + + return val, diags +} + +// EvalReference evaluates the given reference in the receiving scope and +// returns the resulting value. The value will be converted to the given type before +// it is returned if possible, or else an error diagnostic will be produced +// describing the conversion error. +// +// Pass an expected type of cty.DynamicPseudoType to skip automatic conversion +// and just obtain the returned value directly. +// +// If the returned diagnostics contains errors then the result may be +// incomplete, but will always be of the requested type. +func (s *Scope) EvalReference(ref *addrs.Reference, wantType cty.Type) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // We cheat a bit here and just build an EvalContext for our requested + // reference with the "self" address overridden, and then pull the "self" + // result out of it to return. + ctx, ctxDiags := s.evalContext([]*addrs.Reference{ref}, ref.Subject) + diags = diags.Append(ctxDiags) + val := ctx.Variables["self"] + if val == cty.NilVal { + val = cty.DynamicVal + } + + var convErr error + val, convErr = convert.Convert(val, wantType) + if convErr != nil { + val = cty.UnknownVal(wantType) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect value type", + Detail: fmt.Sprintf("Invalid expression value: %s.", tfdiags.FormatError(convErr)), + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + } + + return val, diags +} + +// EvalContext constructs a HCL expression evaluation context whose variable +// scope contains sufficient values to satisfy the given set of references. +// +// Most callers should prefer to use the evaluation helper methods that +// this type offers, but this is here for less common situations where the +// caller will handle the evaluation calls itself. +func (s *Scope) EvalContext(refs []*addrs.Reference) (*hcl.EvalContext, tfdiags.Diagnostics) { + return s.evalContext(refs, s.SelfAddr) +} + +func (s *Scope) evalContext(refs []*addrs.Reference, selfAddr addrs.Referenceable) (*hcl.EvalContext, tfdiags.Diagnostics) { + if s == nil { + panic("attempt to construct EvalContext for nil Scope") + } + + var diags tfdiags.Diagnostics + vals := make(map[string]cty.Value) + funcs := s.Functions() + ctx := &hcl.EvalContext{ + Variables: vals, + Functions: funcs, + } + + if len(refs) == 0 { + // Easy path for common case where there are no references at all. + return ctx, diags + } + + // First we'll do static validation of the references. This catches things + // early that might otherwise not get caught due to unknown values being + // present in the scope during planning. + if staticDiags := s.Data.StaticValidateReferences(refs, selfAddr); staticDiags.HasErrors() { + diags = diags.Append(staticDiags) + return ctx, diags + } + + // The reference set we are given has not been de-duped, and so there can + // be redundant requests in it for two reasons: + // - The same item is referenced multiple times + // - Both an item and that item's container are separately referenced. + // We will still visit every reference here and ask our data source for + // it, since that allows us to gather a full set of any errors and + // warnings, but once we've gathered all the data we'll then skip anything + // that's redundant in the process of populating our values map. + dataResources := map[string]map[string]map[addrs.InstanceKey]cty.Value{} + managedResources := map[string]map[string]map[addrs.InstanceKey]cty.Value{} + wholeModules := map[string]map[addrs.InstanceKey]cty.Value{} + moduleOutputs := map[string]map[addrs.InstanceKey]map[string]cty.Value{} + inputVariables := map[string]cty.Value{} + localValues := map[string]cty.Value{} + pathAttrs := map[string]cty.Value{} + terraformAttrs := map[string]cty.Value{} + countAttrs := map[string]cty.Value{} + var self cty.Value + + for _, ref := range refs { + rng := ref.SourceRange + isSelf := false + + rawSubj := ref.Subject + if rawSubj == addrs.Self { + if selfAddr == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "self" reference`, + // This detail message mentions some current practice that + // this codepath doesn't really "know about". If the "self" + // object starts being supported in more contexts later then + // we'll need to adjust this message. + Detail: `The "self" object is not available in this context. This object can be used only in resource provisioner and connection blocks.`, + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + continue + } + + // Treat "self" as an alias for the configured self address. + rawSubj = selfAddr + isSelf = true + + if rawSubj == addrs.Self { + // Programming error: the self address cannot alias itself. + panic("scope SelfAddr attempting to alias itself") + } + } + + // This type switch must cover all of the "Referenceable" implementations + // in package addrs. + switch subj := rawSubj.(type) { + + case addrs.ResourceInstance: + var into map[string]map[string]map[addrs.InstanceKey]cty.Value + switch subj.Resource.Mode { + case addrs.ManagedResourceMode: + into = managedResources + case addrs.DataResourceMode: + into = dataResources + default: + panic(fmt.Errorf("unsupported ResourceMode %s", subj.Resource.Mode)) + } + + val, valDiags := normalizeRefValue(s.Data.GetResourceInstance(subj, rng)) + diags = diags.Append(valDiags) + + r := subj.Resource + if into[r.Type] == nil { + into[r.Type] = make(map[string]map[addrs.InstanceKey]cty.Value) + } + if into[r.Type][r.Name] == nil { + into[r.Type][r.Name] = make(map[addrs.InstanceKey]cty.Value) + } + into[r.Type][r.Name][subj.Key] = val + if isSelf { + self = val + } + + case addrs.ModuleCallInstance: + val, valDiags := normalizeRefValue(s.Data.GetModuleInstance(subj, rng)) + diags = diags.Append(valDiags) + + if wholeModules[subj.Call.Name] == nil { + wholeModules[subj.Call.Name] = make(map[addrs.InstanceKey]cty.Value) + } + wholeModules[subj.Call.Name][subj.Key] = val + if isSelf { + self = val + } + + case addrs.ModuleCallOutput: + val, valDiags := normalizeRefValue(s.Data.GetModuleInstanceOutput(subj, rng)) + diags = diags.Append(valDiags) + + callName := subj.Call.Call.Name + callKey := subj.Call.Key + if moduleOutputs[callName] == nil { + moduleOutputs[callName] = make(map[addrs.InstanceKey]map[string]cty.Value) + } + if moduleOutputs[callName][callKey] == nil { + moduleOutputs[callName][callKey] = make(map[string]cty.Value) + } + moduleOutputs[callName][callKey][subj.Name] = val + if isSelf { + self = val + } + + case addrs.InputVariable: + val, valDiags := normalizeRefValue(s.Data.GetInputVariable(subj, rng)) + diags = diags.Append(valDiags) + inputVariables[subj.Name] = val + if isSelf { + self = val + } + + case addrs.LocalValue: + val, valDiags := normalizeRefValue(s.Data.GetLocalValue(subj, rng)) + diags = diags.Append(valDiags) + localValues[subj.Name] = val + if isSelf { + self = val + } + + case addrs.PathAttr: + val, valDiags := normalizeRefValue(s.Data.GetPathAttr(subj, rng)) + diags = diags.Append(valDiags) + pathAttrs[subj.Name] = val + if isSelf { + self = val + } + + case addrs.TerraformAttr: + val, valDiags := normalizeRefValue(s.Data.GetTerraformAttr(subj, rng)) + diags = diags.Append(valDiags) + terraformAttrs[subj.Name] = val + if isSelf { + self = val + } + + case addrs.CountAttr: + val, valDiags := normalizeRefValue(s.Data.GetCountAttr(subj, rng)) + diags = diags.Append(valDiags) + countAttrs[subj.Name] = val + if isSelf { + self = val + } + + default: + // Should never happen + panic(fmt.Errorf("Scope.buildEvalContext cannot handle address type %T", rawSubj)) + } + } + + for k, v := range buildResourceObjects(managedResources) { + vals[k] = v + } + vals["data"] = cty.ObjectVal(buildResourceObjects(dataResources)) + vals["module"] = cty.ObjectVal(buildModuleObjects(wholeModules, moduleOutputs)) + vals["var"] = cty.ObjectVal(inputVariables) + vals["local"] = cty.ObjectVal(localValues) + vals["path"] = cty.ObjectVal(pathAttrs) + vals["terraform"] = cty.ObjectVal(terraformAttrs) + vals["count"] = cty.ObjectVal(countAttrs) + if self != cty.NilVal { + vals["self"] = self + } + + return ctx, diags +} + +func buildResourceObjects(resources map[string]map[string]map[addrs.InstanceKey]cty.Value) map[string]cty.Value { + vals := make(map[string]cty.Value) + for typeName, names := range resources { + nameVals := make(map[string]cty.Value) + for name, keys := range names { + nameVals[name] = buildInstanceObjects(keys) + } + vals[typeName] = cty.ObjectVal(nameVals) + } + return vals +} + +func buildModuleObjects(wholeModules map[string]map[addrs.InstanceKey]cty.Value, moduleOutputs map[string]map[addrs.InstanceKey]map[string]cty.Value) map[string]cty.Value { + vals := make(map[string]cty.Value) + + for name, keys := range wholeModules { + vals[name] = buildInstanceObjects(keys) + } + + for name, keys := range moduleOutputs { + if _, exists := wholeModules[name]; exists { + // If we also have a whole module value for this name then we'll + // skip this since the individual outputs are embedded in that result. + continue + } + + // The shape of this collection isn't compatible with buildInstanceObjects, + // but rather than replicating most of the buildInstanceObjects logic + // here we'll instead first transform the structure to be what that + // function expects and then use it. This is a little wasteful, but + // we do not expect this these maps to be large and so the extra work + // here should not hurt too much. + flattened := make(map[addrs.InstanceKey]cty.Value, len(keys)) + for k, vals := range keys { + flattened[k] = cty.ObjectVal(vals) + } + vals[name] = buildInstanceObjects(flattened) + } + + return vals +} + +func buildInstanceObjects(keys map[addrs.InstanceKey]cty.Value) cty.Value { + if val, exists := keys[addrs.NoKey]; exists { + // If present, a "no key" value supersedes all other values, + // since they should be embedded inside it. + return val + } + + // If we only have individual values then we need to construct + // either a list or a map, depending on what sort of keys we + // have. + haveInt := false + haveString := false + maxInt := 0 + + for k := range keys { + switch tk := k.(type) { + case addrs.IntKey: + haveInt = true + if int(tk) > maxInt { + maxInt = int(tk) + } + case addrs.StringKey: + haveString = true + } + } + + // We should either have ints or strings and not both, but + // if we have both then we'll prefer strings and let the + // language interpreter try to convert the int keys into + // strings in a map. + switch { + case haveString: + vals := make(map[string]cty.Value) + for k, v := range keys { + switch tk := k.(type) { + case addrs.StringKey: + vals[string(tk)] = v + case addrs.IntKey: + sk := strconv.Itoa(int(tk)) + vals[sk] = v + } + } + return cty.ObjectVal(vals) + case haveInt: + // We'll make a tuple that is long enough for our maximum + // index value. It doesn't matter if we end up shorter than + // the number of instances because if length(...) were + // being evaluated we would've got a NoKey reference and + // thus not ended up in this codepath at all. + vals := make([]cty.Value, maxInt+1) + for i := range vals { + if v, exists := keys[addrs.IntKey(i)]; exists { + vals[i] = v + } else { + // Just a placeholder, since nothing will access this anyway + vals[i] = cty.DynamicVal + } + } + return cty.TupleVal(vals) + default: + // Should never happen because there are no other key types. + log.Printf("[ERROR] strange makeInstanceObjects call with no supported key types") + return cty.EmptyObjectVal + } +} + +func normalizeRefValue(val cty.Value, diags tfdiags.Diagnostics) (cty.Value, tfdiags.Diagnostics) { + if diags.HasErrors() { + // If there are errors then we will force an unknown result so that + // we can still evaluate and catch type errors but we'll avoid + // producing redundant re-statements of the same errors we've already + // dealt with here. + return cty.UnknownVal(val.Type()), diags + } + return val, diags +} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/cidr.go b/vendor/github.com/hashicorp/terraform/lang/funcs/cidr.go new file mode 100644 index 0000000000..6ce8aa9fa2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/funcs/cidr.go @@ -0,0 +1,129 @@ +package funcs + +import ( + "fmt" + "net" + + "github.com/apparentlymart/go-cidr/cidr" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" +) + +// CidrHostFunc contructs a function that calculates a full host IP address +// within a given IP network address prefix. +var CidrHostFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "prefix", + Type: cty.String, + }, + { + Name: "hostnum", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var hostNum int + if err := gocty.FromCtyValue(args[1], &hostNum); err != nil { + return cty.UnknownVal(cty.String), err + } + _, network, err := net.ParseCIDR(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) + } + + ip, err := cidr.Host(network, hostNum) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(ip.String()), nil + }, +}) + +// CidrNetmaskFunc contructs a function that converts an IPv4 address prefix given +// in CIDR notation into a subnet mask address. +var CidrNetmaskFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "prefix", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + _, network, err := net.ParseCIDR(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) + } + + return cty.StringVal(net.IP(network.Mask).String()), nil + }, +}) + +// CidrSubnetFunc contructs a function that calculates a subnet address within +// a given IP network address prefix. +var CidrSubnetFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "prefix", + Type: cty.String, + }, + { + Name: "newbits", + Type: cty.Number, + }, + { + Name: "netnum", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var newbits int + if err := gocty.FromCtyValue(args[1], &newbits); err != nil { + return cty.UnknownVal(cty.String), err + } + var netnum int + if err := gocty.FromCtyValue(args[2], &netnum); err != nil { + return cty.UnknownVal(cty.String), err + } + + _, network, err := net.ParseCIDR(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) + } + + // For portability with 32-bit systems where the subnet number + // will be a 32-bit int, we only allow extension of 32 bits in + // one call even if we're running on a 64-bit machine. + // (Of course, this is significant only for IPv6.) + if newbits > 32 { + return cty.UnknownVal(cty.String), fmt.Errorf("may not extend prefix by more than 32 bits") + } + + newNetwork, err := cidr.Subnet(network, newbits, netnum) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(newNetwork.String()), nil + }, +}) + +// CidrHost calculates a full host IP address within a given IP network address prefix. +func CidrHost(prefix, hostnum cty.Value) (cty.Value, error) { + return CidrHostFunc.Call([]cty.Value{prefix, hostnum}) +} + +// CidrNetmask converts an IPv4 address prefix given in CIDR notation into a subnet mask address. +func CidrNetmask(prefix cty.Value) (cty.Value, error) { + return CidrNetmaskFunc.Call([]cty.Value{prefix}) +} + +// CidrSubnet calculates a subnet address within a given IP network address prefix. +func CidrSubnet(prefix, newbits, netnum cty.Value) (cty.Value, error) { + return CidrSubnetFunc.Call([]cty.Value{prefix, newbits, netnum}) +} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/collection.go b/vendor/github.com/hashicorp/terraform/lang/funcs/collection.go new file mode 100644 index 0000000000..f85bfae2f5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/funcs/collection.go @@ -0,0 +1,1418 @@ +package funcs + +import ( + "fmt" + "sort" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/function/stdlib" + "github.com/zclconf/go-cty/cty/gocty" +) + +var ElementFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.DynamicPseudoType, + }, + { + Name: "index", + Type: cty.Number, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + list := args[0] + listTy := list.Type() + switch { + case listTy.IsListType(): + return listTy.ElementType(), nil + case listTy.IsTupleType(): + if !args[1].IsKnown() { + // If the index isn't known yet then we can't predict the + // result type since each tuple element can have its own type. + return cty.DynamicPseudoType, nil + } + + etys := listTy.TupleElementTypes() + var index int + err := gocty.FromCtyValue(args[1], &index) + if err != nil { + // e.g. fractional number where whole number is required + return cty.DynamicPseudoType, fmt.Errorf("invalid index: %s", err) + } + if len(etys) == 0 { + return cty.DynamicPseudoType, fmt.Errorf("cannot use element function with an empty list") + } + index = index % len(etys) + return etys[index], nil + default: + return cty.DynamicPseudoType, fmt.Errorf("cannot read elements from %s", listTy.FriendlyName()) + } + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + var index int + err := gocty.FromCtyValue(args[1], &index) + if err != nil { + // can't happen because we checked this in the Type function above + return cty.DynamicVal, fmt.Errorf("invalid index: %s", err) + } + + if !args[0].IsKnown() { + return cty.UnknownVal(retType), nil + } + + l := args[0].LengthInt() + if l == 0 { + return cty.DynamicVal, fmt.Errorf("cannot use element function with an empty list") + } + index = index % l + + // We did all the necessary type checks in the type function above, + // so this is guaranteed not to fail. + return args[0].Index(cty.NumberIntVal(int64(index))), nil + }, +}) + +var LengthFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "value", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + AllowUnknown: true, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + collTy := args[0].Type() + switch { + case collTy == cty.String || collTy.IsTupleType() || collTy.IsObjectType() || collTy.IsListType() || collTy.IsMapType() || collTy.IsSetType() || collTy == cty.DynamicPseudoType: + return cty.Number, nil + default: + return cty.Number, fmt.Errorf("argument must be a string, a collection type, or a structural type") + } + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + coll := args[0] + collTy := args[0].Type() + switch { + case collTy == cty.DynamicPseudoType: + return cty.UnknownVal(cty.Number), nil + case collTy.IsTupleType(): + l := len(collTy.TupleElementTypes()) + return cty.NumberIntVal(int64(l)), nil + case collTy.IsObjectType(): + l := len(collTy.AttributeTypes()) + return cty.NumberIntVal(int64(l)), nil + case collTy == cty.String: + // We'll delegate to the cty stdlib strlen function here, because + // it deals with all of the complexities of tokenizing unicode + // grapheme clusters. + return stdlib.Strlen(coll) + case collTy.IsListType() || collTy.IsSetType() || collTy.IsMapType(): + return coll.Length(), nil + default: + // Should never happen, because of the checks in our Type func above + return cty.UnknownVal(cty.Number), fmt.Errorf("impossible value type for length(...)") + } + }, +}) + +// CoalesceFunc contructs a function that takes any number of arguments and +// returns the first one that isn't empty. This function was copied from go-cty +// stdlib and modified so that it returns the first *non-empty* non-null element +// from a sequence, instead of merely the first non-null. +var CoalesceFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + argTypes := make([]cty.Type, len(args)) + for i, val := range args { + argTypes[i] = val.Type() + } + retType, _ := convert.UnifyUnsafe(argTypes) + if retType == cty.NilType { + return cty.NilType, fmt.Errorf("all arguments must have the same type") + } + return retType, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + for _, argVal := range args { + // We already know this will succeed because of the checks in our Type func above + argVal, _ = convert.Convert(argVal, retType) + if !argVal.IsKnown() { + return cty.UnknownVal(retType), nil + } + if argVal.IsNull() { + continue + } + if retType == cty.String && argVal.RawEquals(cty.StringVal("")) { + continue + } + + return argVal, nil + } + return cty.NilVal, fmt.Errorf("no non-null, non-empty-string arguments") + }, +}) + +// CoalesceListFunc contructs a function that takes any number of list arguments +// and returns the first one that isn't empty. +var CoalesceListFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.List(cty.DynamicPseudoType), + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + if len(args) == 0 { + return cty.NilType, fmt.Errorf("at least one argument is required") + } + + argTypes := make([]cty.Type, len(args)) + + for i, arg := range args { + argTypes[i] = arg.Type() + } + + retType, _ := convert.UnifyUnsafe(argTypes) + if retType == cty.NilType { + return cty.NilType, fmt.Errorf("all arguments must have the same type") + } + + return retType, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + + vals := make([]cty.Value, 0, len(args)) + for _, arg := range args { + if !arg.IsKnown() { + // If we run into an unknown list at some point, we can't + // predict the final result yet. (If there's a known, non-empty + // arg before this then we won't get here.) + return cty.UnknownVal(retType), nil + } + + // We already know this will succeed because of the checks in our Type func above + arg, _ = convert.Convert(arg, retType) + + it := arg.ElementIterator() + for it.Next() { + _, v := it.Element() + vals = append(vals, v) + } + + if len(vals) > 0 { + return cty.ListVal(vals), nil + } + } + + return cty.NilVal, fmt.Errorf("no non-null arguments") + }, +}) + +// CompactFunc contructs a function that takes a list of strings and returns a new list +// with any empty string elements removed. +var CompactFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.List(cty.String), + }, + }, + Type: function.StaticReturnType(cty.List(cty.String)), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + listVal := args[0] + if !listVal.IsWhollyKnown() { + // If some of the element values aren't known yet then we + // can't yet return a compacted list + return cty.UnknownVal(retType), nil + } + + var outputList []cty.Value + + for it := listVal.ElementIterator(); it.Next(); { + _, v := it.Element() + if v.AsString() == "" { + continue + } + outputList = append(outputList, v) + } + + if len(outputList) == 0 { + return cty.ListValEmpty(cty.String), nil + } + + return cty.ListVal(outputList), nil + }, +}) + +// ContainsFunc contructs a function that determines whether a given list contains +// a given single value as one of its elements. +var ContainsFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.List(cty.DynamicPseudoType), + }, + { + Name: "value", + Type: cty.DynamicPseudoType, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + + _, err = Index(args[0], args[1]) + if err != nil { + return cty.False, nil + } + + return cty.True, nil + }, +}) + +// IndexFunc contructs a function that finds the element index for a given value in a list. +var IndexFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.DynamicPseudoType, + }, + { + Name: "value", + Type: cty.DynamicPseudoType, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + if !(args[0].Type().IsListType() || args[0].Type().IsTupleType()) { + return cty.NilVal, fmt.Errorf("argument must be a list or tuple") + } + + if !args[0].IsKnown() { + return cty.UnknownVal(cty.Number), nil + } + + if args[0].LengthInt() == 0 { // Easy path + return cty.NilVal, fmt.Errorf("cannot search an empty list") + } + + for it := args[0].ElementIterator(); it.Next(); { + i, v := it.Element() + eq, err := stdlib.Equal(v, args[1]) + if err != nil { + return cty.NilVal, err + } + if !eq.IsKnown() { + return cty.UnknownVal(cty.Number), nil + } + if eq.True() { + return i, nil + } + } + return cty.NilVal, fmt.Errorf("item not found") + + }, +}) + +// DistinctFunc contructs a function that takes a list and returns a new list +// with any duplicate elements removed. +var DistinctFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.List(cty.DynamicPseudoType), + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + return args[0].Type(), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + listVal := args[0] + + if !listVal.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + var list []cty.Value + + for it := listVal.ElementIterator(); it.Next(); { + _, v := it.Element() + list, err = appendIfMissing(list, v) + if err != nil { + return cty.NilVal, err + } + } + + return cty.ListVal(list), nil + }, +}) + +// ChunklistFunc contructs a function that splits a single list into fixed-size chunks, +// returning a list of lists. +var ChunklistFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.List(cty.DynamicPseudoType), + }, + { + Name: "size", + Type: cty.Number, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + return cty.List(args[0].Type()), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + listVal := args[0] + if !listVal.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + + var size int + err = gocty.FromCtyValue(args[1], &size) + if err != nil { + return cty.NilVal, fmt.Errorf("invalid index: %s", err) + } + + if size < 0 { + return cty.NilVal, fmt.Errorf("the size argument must be positive") + } + + output := make([]cty.Value, 0) + + // if size is 0, returns a list made of the initial list + if size == 0 { + output = append(output, listVal) + return cty.ListVal(output), nil + } + + chunk := make([]cty.Value, 0) + + l := args[0].LengthInt() + i := 0 + + for it := listVal.ElementIterator(); it.Next(); { + _, v := it.Element() + chunk = append(chunk, v) + + // Chunk when index isn't 0, or when reaching the values's length + if (i+1)%size == 0 || (i+1) == l { + output = append(output, cty.ListVal(chunk)) + chunk = make([]cty.Value, 0) + } + i++ + } + + return cty.ListVal(output), nil + }, +}) + +// FlattenFunc contructs a function that takes a list and replaces any elements +// that are lists with a flattened sequence of the list contents. +var FlattenFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.List(cty.DynamicPseudoType), + }, + }, + Type: function.StaticReturnType(cty.List(cty.DynamicPseudoType)), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + inputList := args[0] + if !inputList.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + + if inputList.LengthInt() == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + outputList := make([]cty.Value, 0) + + return cty.ListVal(flattener(outputList, inputList)), nil + }, +}) + +// Flatten until it's not a cty.List +func flattener(finalList []cty.Value, flattenList cty.Value) []cty.Value { + + for it := flattenList.ElementIterator(); it.Next(); { + _, val := it.Element() + + if val.Type().IsListType() { + finalList = flattener(finalList, val) + } else { + finalList = append(finalList, val) + } + } + return finalList +} + +// KeysFunc contructs a function that takes a map and returns a sorted list of the map keys. +var KeysFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "inputMap", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + ty := args[0].Type() + switch { + case ty.IsMapType(): + return cty.List(cty.String), nil + case ty.IsObjectType(): + atys := ty.AttributeTypes() + if len(atys) == 0 { + return cty.EmptyTuple, nil + } + // All of our result elements will be strings, and atys just + // decides how many there are. + etys := make([]cty.Type, len(atys)) + for i := range etys { + etys[i] = cty.String + } + return cty.Tuple(etys), nil + default: + return cty.DynamicPseudoType, function.NewArgErrorf(0, "must have map or object type") + } + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + m := args[0] + var keys []cty.Value + + switch { + case m.Type().IsObjectType(): + // In this case we allow unknown values so we must work only with + // the attribute _types_, not with the value itself. + var names []string + for name := range m.Type().AttributeTypes() { + names = append(names, name) + } + sort.Strings(names) // same ordering guaranteed by cty's ElementIterator + if len(names) == 0 { + return cty.EmptyTupleVal, nil + } + keys = make([]cty.Value, len(names)) + for i, name := range names { + keys[i] = cty.StringVal(name) + } + return cty.TupleVal(keys), nil + default: + if !m.IsKnown() { + return cty.UnknownVal(retType), nil + } + + // cty guarantees that ElementIterator will iterate in lexicographical + // order by key. + for it := args[0].ElementIterator(); it.Next(); { + k, _ := it.Element() + keys = append(keys, k) + } + if len(keys) == 0 { + return cty.ListValEmpty(cty.String), nil + } + return cty.ListVal(keys), nil + } + }, +}) + +// ListFunc contructs a function that takes an arbitrary number of arguments +// and returns a list containing those values in the same order. +// +// This function is deprecated in Terraform v0.12 +var ListFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + if len(args) == 0 { + return cty.NilType, fmt.Errorf("at least one argument is required") + } + + argTypes := make([]cty.Type, len(args)) + + for i, arg := range args { + argTypes[i] = arg.Type() + } + + retType, _ := convert.UnifyUnsafe(argTypes) + if retType == cty.NilType { + return cty.NilType, fmt.Errorf("all arguments must have the same type") + } + + return cty.List(retType), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + newList := make([]cty.Value, 0, len(args)) + + for _, arg := range args { + // We already know this will succeed because of the checks in our Type func above + arg, _ = convert.Convert(arg, retType.ElementType()) + newList = append(newList, arg) + } + + return cty.ListVal(newList), nil + }, +}) + +// LookupFunc contructs a function that performs dynamic lookups of map types. +var LookupFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "inputMap", + Type: cty.DynamicPseudoType, + }, + { + Name: "key", + Type: cty.String, + }, + }, + VarParam: &function.Parameter{ + Name: "default", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + if len(args) < 1 || len(args) > 3 { + return cty.NilType, fmt.Errorf("lookup() takes two or three arguments, got %d", len(args)) + } + + ty := args[0].Type() + + switch { + case ty.IsObjectType(): + if !args[1].IsKnown() { + return cty.DynamicPseudoType, nil + } + + key := args[1].AsString() + if ty.HasAttribute(key) { + return args[0].GetAttr(key).Type(), nil + } else if len(args) == 3 { + // if the key isn't found but a default is provided, + // return the default type + return args[2].Type(), nil + } + return cty.DynamicPseudoType, function.NewArgErrorf(0, "the given object has no attribute %q", key) + case ty.IsMapType(): + return ty.ElementType(), nil + default: + return cty.NilType, function.NewArgErrorf(0, "lookup() requires a map as the first argument") + } + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var defaultVal cty.Value + defaultValueSet := false + + if len(args) == 3 { + defaultVal = args[2] + defaultValueSet = true + } + + mapVar := args[0] + lookupKey := args[1].AsString() + + if !mapVar.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + + if mapVar.Type().IsObjectType() { + if mapVar.Type().HasAttribute(lookupKey) { + return mapVar.GetAttr(lookupKey), nil + } + } else if mapVar.HasIndex(cty.StringVal(lookupKey)) == cty.True { + v := mapVar.Index(cty.StringVal(lookupKey)) + if ty := v.Type(); !ty.Equals(cty.NilType) { + switch { + case ty.Equals(cty.String): + return cty.StringVal(v.AsString()), nil + case ty.Equals(cty.Number): + return cty.NumberVal(v.AsBigFloat()), nil + default: + return cty.NilVal, fmt.Errorf("lookup() can only be used with flat lists") + } + } + } + + if defaultValueSet { + defaultVal, err = convert.Convert(defaultVal, retType) + if err != nil { + return cty.NilVal, err + } + return defaultVal, nil + } + + return cty.UnknownVal(cty.DynamicPseudoType), fmt.Errorf( + "lookup failed to find '%s'", lookupKey) + }, +}) + +// MapFunc contructs a function that takes an even number of arguments and +// returns a map whose elements are constructed from consecutive pairs of arguments. +// +// This function is deprecated in Terraform v0.12 +var MapFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + if len(args) < 2 || len(args)%2 != 0 { + return cty.NilType, fmt.Errorf("map requires an even number of two or more arguments, got %d", len(args)) + } + + argTypes := make([]cty.Type, len(args)/2) + index := 0 + + for i := 0; i < len(args); i += 2 { + argTypes[index] = args[i+1].Type() + index++ + } + + valType, _ := convert.UnifyUnsafe(argTypes) + if valType == cty.NilType { + return cty.NilType, fmt.Errorf("all arguments must have the same type") + } + + return cty.Map(valType), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + for _, arg := range args { + if !arg.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + } + + outputMap := make(map[string]cty.Value) + + for i := 0; i < len(args); i += 2 { + + key := args[i].AsString() + + err := gocty.FromCtyValue(args[i], &key) + if err != nil { + return cty.NilVal, err + } + + val := args[i+1] + + var variable cty.Value + err = gocty.FromCtyValue(val, &variable) + if err != nil { + return cty.NilVal, err + } + + // We already know this will succeed because of the checks in our Type func above + variable, _ = convert.Convert(variable, retType.ElementType()) + + // Check for duplicate keys + if _, ok := outputMap[key]; ok { + return cty.NilVal, fmt.Errorf("argument %d is a duplicate key: %q", i+1, key) + } + outputMap[key] = variable + } + + return cty.MapVal(outputMap), nil + }, +}) + +// MatchkeysFunc contructs a function that constructs a new list by taking a +// subset of elements from one list whose indexes match the corresponding +// indexes of values in another list. +var MatchkeysFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "values", + Type: cty.List(cty.DynamicPseudoType), + }, + { + Name: "keys", + Type: cty.List(cty.DynamicPseudoType), + }, + { + Name: "searchset", + Type: cty.List(cty.DynamicPseudoType), + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + if !args[1].Type().Equals(args[2].Type()) { + return cty.NilType, fmt.Errorf("lists must be of the same type") + } + + return args[0].Type(), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + if !args[0].IsKnown() { + return cty.UnknownVal(cty.List(retType.ElementType())), nil + } + + if args[0].LengthInt() != args[1].LengthInt() { + return cty.ListValEmpty(retType.ElementType()), fmt.Errorf("length of keys and values should be equal") + } + + output := make([]cty.Value, 0) + + values := args[0] + keys := args[1] + searchset := args[2] + + // if searchset is empty, return an empty list. + if searchset.LengthInt() == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + + if !values.IsWhollyKnown() || !keys.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + + i := 0 + for it := keys.ElementIterator(); it.Next(); { + _, key := it.Element() + for iter := searchset.ElementIterator(); iter.Next(); { + _, search := iter.Element() + eq, err := stdlib.Equal(key, search) + if err != nil { + return cty.NilVal, err + } + if !eq.IsKnown() { + return cty.ListValEmpty(retType.ElementType()), nil + } + if eq.True() { + v := values.Index(cty.NumberIntVal(int64(i))) + output = append(output, v) + break + } + } + i++ + } + + // if we haven't matched any key, then output is an empty list. + if len(output) == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + return cty.ListVal(output), nil + }, +}) + +// MergeFunc contructs a function that takes an arbitrary number of maps and +// returns a single map that contains a merged set of elements from all of the maps. +// +// If more than one given map defines the same key then the one that is later in +// the argument sequence takes precedence. +var MergeFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "maps", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + AllowNull: true, + }, + Type: function.StaticReturnType(cty.DynamicPseudoType), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + outputMap := make(map[string]cty.Value) + + for _, arg := range args { + if !arg.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + if !arg.Type().IsObjectType() && !arg.Type().IsMapType() { + return cty.NilVal, fmt.Errorf("arguments must be maps or objects, got %#v", arg.Type().FriendlyName()) + } + for it := arg.ElementIterator(); it.Next(); { + k, v := it.Element() + outputMap[k.AsString()] = v + } + } + return cty.ObjectVal(outputMap), nil + }, +}) + +// ReverseFunc takes a sequence and produces a new sequence of the same length +// with all of the same elements as the given sequence but in reverse order. +var ReverseFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.DynamicPseudoType, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + argTy := args[0].Type() + switch { + case argTy.IsTupleType(): + argTys := argTy.TupleElementTypes() + retTys := make([]cty.Type, len(argTys)) + for i, ty := range argTys { + retTys[len(retTys)-i-1] = ty + } + return cty.Tuple(retTys), nil + case argTy.IsListType(), argTy.IsSetType(): // We accept sets here to mimic the usual behavior of auto-converting to list + return cty.List(argTy.ElementType()), nil + default: + return cty.NilType, function.NewArgErrorf(0, "can only reverse list or tuple values, not %s", argTy.FriendlyName()) + } + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + in := args[0].AsValueSlice() + outVals := make([]cty.Value, len(in)) + for i, v := range in { + outVals[len(outVals)-i-1] = v + } + switch { + case retType.IsTupleType(): + return cty.TupleVal(outVals), nil + default: + if len(outVals) == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + return cty.ListVal(outVals), nil + } + }, +}) + +// SetProductFunc calculates the cartesian product of two or more sets or +// sequences. If the arguments are all lists then the result is a list of tuples, +// preserving the ordering of all of the input lists. Otherwise the result is a +// set of tuples. +var SetProductFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "sets", + Type: cty.DynamicPseudoType, + }, + Type: func(args []cty.Value) (retType cty.Type, err error) { + if len(args) < 2 { + return cty.NilType, fmt.Errorf("at least two arguments are required") + } + + listCount := 0 + elemTys := make([]cty.Type, len(args)) + for i, arg := range args { + aty := arg.Type() + switch { + case aty.IsSetType(): + elemTys[i] = aty.ElementType() + case aty.IsListType(): + elemTys[i] = aty.ElementType() + listCount++ + case aty.IsTupleType(): + // We can accept a tuple type only if there's some common type + // that all of its elements can be converted to. + allEtys := aty.TupleElementTypes() + if len(allEtys) == 0 { + elemTys[i] = cty.DynamicPseudoType + listCount++ + break + } + ety, _ := convert.UnifyUnsafe(allEtys) + if ety == cty.NilType { + return cty.NilType, function.NewArgErrorf(i, "all elements must be of the same type") + } + elemTys[i] = ety + listCount++ + default: + return cty.NilType, function.NewArgErrorf(i, "a set or a list is required") + } + } + + if listCount == len(args) { + return cty.List(cty.Tuple(elemTys)), nil + } + return cty.Set(cty.Tuple(elemTys)), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + ety := retType.ElementType() + + total := 1 + for _, arg := range args { + // Because of our type checking function, we are guaranteed that + // all of the arguments are known, non-null values of types that + // support LengthInt. + total *= arg.LengthInt() + } + + if total == 0 { + // If any of the arguments was an empty collection then our result + // is also an empty collection, which we'll short-circuit here. + if retType.IsListType() { + return cty.ListValEmpty(ety), nil + } + return cty.SetValEmpty(ety), nil + } + + subEtys := ety.TupleElementTypes() + product := make([][]cty.Value, total) + + b := make([]cty.Value, total*len(args)) + n := make([]int, len(args)) + s := 0 + argVals := make([][]cty.Value, len(args)) + for i, arg := range args { + argVals[i] = arg.AsValueSlice() + } + + for i := range product { + e := s + len(args) + pi := b[s:e] + product[i] = pi + s = e + + for j, n := range n { + val := argVals[j][n] + ty := subEtys[j] + if !val.Type().Equals(ty) { + var err error + val, err = convert.Convert(val, ty) + if err != nil { + // Should never happen since we checked this in our + // type-checking function. + return cty.NilVal, fmt.Errorf("failed to convert argVals[%d][%d] to %s; this is a bug in Terraform", j, n, ty.FriendlyName()) + } + } + pi[j] = val + } + + for j := len(n) - 1; j >= 0; j-- { + n[j]++ + if n[j] < len(argVals[j]) { + break + } + n[j] = 0 + } + } + + productVals := make([]cty.Value, total) + for i, vals := range product { + productVals[i] = cty.TupleVal(vals) + } + + if retType.IsListType() { + return cty.ListVal(productVals), nil + } + return cty.SetVal(productVals), nil + }, +}) + +// SliceFunc contructs a function that extracts some consecutive elements +// from within a list. +var SliceFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.List(cty.DynamicPseudoType), + }, + { + Name: "startIndex", + Type: cty.Number, + }, + { + Name: "endIndex", + Type: cty.Number, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + return args[0].Type(), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + inputList := args[0] + if !inputList.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + var startIndex, endIndex int + + if err = gocty.FromCtyValue(args[1], &startIndex); err != nil { + return cty.NilVal, fmt.Errorf("invalid start index: %s", err) + } + if err = gocty.FromCtyValue(args[2], &endIndex); err != nil { + return cty.NilVal, fmt.Errorf("invalid start index: %s", err) + } + + if startIndex < 0 { + return cty.NilVal, fmt.Errorf("from index must be >= 0") + } + if endIndex > inputList.LengthInt() { + return cty.NilVal, fmt.Errorf("to index must be <= length of the input list") + } + if startIndex > endIndex { + return cty.NilVal, fmt.Errorf("from index must be <= to index") + } + + var outputList []cty.Value + + i := 0 + for it := inputList.ElementIterator(); it.Next(); { + _, v := it.Element() + if i >= startIndex && i < endIndex { + outputList = append(outputList, v) + } + i++ + } + + if len(outputList) == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + return cty.ListVal(outputList), nil + }, +}) + +// TransposeFunc contructs a function that takes a map of lists of strings and +// swaps the keys and values to produce a new map of lists of strings. +var TransposeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "values", + Type: cty.Map(cty.List(cty.String)), + }, + }, + Type: function.StaticReturnType(cty.Map(cty.List(cty.String))), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + inputMap := args[0] + if !inputMap.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + + outputMap := make(map[string]cty.Value) + tmpMap := make(map[string][]string) + + for it := inputMap.ElementIterator(); it.Next(); { + inKey, inVal := it.Element() + for iter := inVal.ElementIterator(); iter.Next(); { + _, val := iter.Element() + if !val.Type().Equals(cty.String) { + return cty.MapValEmpty(cty.List(cty.String)), fmt.Errorf("input must be a map of lists of strings") + } + + outKey := val.AsString() + if _, ok := tmpMap[outKey]; !ok { + tmpMap[outKey] = make([]string, 0) + } + outVal := tmpMap[outKey] + outVal = append(outVal, inKey.AsString()) + sort.Strings(outVal) + tmpMap[outKey] = outVal + } + } + + for outKey, outVal := range tmpMap { + values := make([]cty.Value, 0) + for _, v := range outVal { + values = append(values, cty.StringVal(v)) + } + outputMap[outKey] = cty.ListVal(values) + } + + return cty.MapVal(outputMap), nil + }, +}) + +// ValuesFunc contructs a function that returns a list of the map values, +// in the order of the sorted keys. +var ValuesFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "values", + Type: cty.DynamicPseudoType, + }, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + ty := args[0].Type() + if ty.IsMapType() { + return cty.List(ty.ElementType()), nil + } else if ty.IsObjectType() { + // The result is a tuple type with all of the same types as our + // object type's attributes, sorted in lexicographical order by the + // keys. (This matches the sort order guaranteed by ElementIterator + // on a cty object value.) + atys := ty.AttributeTypes() + if len(atys) == 0 { + return cty.EmptyTuple, nil + } + attrNames := make([]string, 0, len(atys)) + for name := range atys { + attrNames = append(attrNames, name) + } + sort.Strings(attrNames) + + tys := make([]cty.Type, len(attrNames)) + for i, name := range attrNames { + tys[i] = atys[name] + } + return cty.Tuple(tys), nil + } + return cty.NilType, fmt.Errorf("values() requires a map as the first argument") + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + mapVar := args[0] + + // We can just iterate the map/object value here because cty guarantees + // that these types always iterate in key lexicographical order. + var values []cty.Value + for it := mapVar.ElementIterator(); it.Next(); { + _, val := it.Element() + values = append(values, val) + } + + if retType.IsTupleType() { + return cty.TupleVal(values), nil + } + if len(values) == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + return cty.ListVal(values), nil + }, +}) + +// ZipmapFunc contructs a function that constructs a map from a list of keys +// and a corresponding list of values. +var ZipmapFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "keys", + Type: cty.List(cty.String), + }, + { + Name: "values", + Type: cty.DynamicPseudoType, + }, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + keys := args[0] + values := args[1] + valuesTy := values.Type() + + switch { + case valuesTy.IsListType(): + return cty.Map(values.Type().ElementType()), nil + case valuesTy.IsTupleType(): + if !keys.IsWhollyKnown() { + // Since zipmap with a tuple produces an object, we need to know + // all of the key names before we can predict our result type. + return cty.DynamicPseudoType, nil + } + + keysRaw := keys.AsValueSlice() + valueTypesRaw := valuesTy.TupleElementTypes() + if len(keysRaw) != len(valueTypesRaw) { + return cty.NilType, fmt.Errorf("number of keys (%d) does not match number of values (%d)", len(keysRaw), len(valueTypesRaw)) + } + atys := make(map[string]cty.Type, len(valueTypesRaw)) + for i, keyVal := range keysRaw { + if keyVal.IsNull() { + return cty.NilType, fmt.Errorf("keys list has null value at index %d", i) + } + key := keyVal.AsString() + atys[key] = valueTypesRaw[i] + } + return cty.Object(atys), nil + + default: + return cty.NilType, fmt.Errorf("values argument must be a list or tuple value") + } + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + keys := args[0] + values := args[1] + + if !keys.IsWhollyKnown() { + // Unknown map keys and object attributes are not supported, so + // our entire result must be unknown in this case. + return cty.UnknownVal(retType), nil + } + + // both keys and values are guaranteed to be shallowly-known here, + // because our declared params above don't allow unknown or null values. + if keys.LengthInt() != values.LengthInt() { + return cty.NilVal, fmt.Errorf("number of keys (%d) does not match number of values (%d)", keys.LengthInt(), values.LengthInt()) + } + + output := make(map[string]cty.Value) + + i := 0 + for it := keys.ElementIterator(); it.Next(); { + _, v := it.Element() + val := values.Index(cty.NumberIntVal(int64(i))) + output[v.AsString()] = val + i++ + } + + switch { + case retType.IsMapType(): + if len(output) == 0 { + return cty.MapValEmpty(retType.ElementType()), nil + } + return cty.MapVal(output), nil + case retType.IsObjectType(): + return cty.ObjectVal(output), nil + default: + // Should never happen because the type-check function should've + // caught any other case. + return cty.NilVal, fmt.Errorf("internally selected incorrect result type %s (this is a bug)", retType.FriendlyName()) + } + }, +}) + +// helper function to add an element to a list, if it does not already exist +func appendIfMissing(slice []cty.Value, element cty.Value) ([]cty.Value, error) { + for _, ele := range slice { + eq, err := stdlib.Equal(ele, element) + if err != nil { + return slice, err + } + if eq.True() { + return slice, nil + } + } + return append(slice, element), nil +} + +// Element returns a single element from a given list at the given index. If +// index is greater than the length of the list then it is wrapped modulo +// the list length. +func Element(list, index cty.Value) (cty.Value, error) { + return ElementFunc.Call([]cty.Value{list, index}) +} + +// Length returns the number of elements in the given collection or number of +// Unicode characters in the given string. +func Length(collection cty.Value) (cty.Value, error) { + return LengthFunc.Call([]cty.Value{collection}) +} + +// Coalesce takes any number of arguments and returns the first one that isn't empty. +func Coalesce(args ...cty.Value) (cty.Value, error) { + return CoalesceFunc.Call(args) +} + +// CoalesceList takes any number of list arguments and returns the first one that isn't empty. +func CoalesceList(args ...cty.Value) (cty.Value, error) { + return CoalesceListFunc.Call(args) +} + +// Compact takes a list of strings and returns a new list +// with any empty string elements removed. +func Compact(list cty.Value) (cty.Value, error) { + return CompactFunc.Call([]cty.Value{list}) +} + +// Contains determines whether a given list contains a given single value +// as one of its elements. +func Contains(list, value cty.Value) (cty.Value, error) { + return ContainsFunc.Call([]cty.Value{list, value}) +} + +// Index finds the element index for a given value in a list. +func Index(list, value cty.Value) (cty.Value, error) { + return IndexFunc.Call([]cty.Value{list, value}) +} + +// Distinct takes a list and returns a new list with any duplicate elements removed. +func Distinct(list cty.Value) (cty.Value, error) { + return DistinctFunc.Call([]cty.Value{list}) +} + +// Chunklist splits a single list into fixed-size chunks, returning a list of lists. +func Chunklist(list, size cty.Value) (cty.Value, error) { + return ChunklistFunc.Call([]cty.Value{list, size}) +} + +// Flatten takes a list and replaces any elements that are lists with a flattened +// sequence of the list contents. +func Flatten(list cty.Value) (cty.Value, error) { + return FlattenFunc.Call([]cty.Value{list}) +} + +// Keys takes a map and returns a sorted list of the map keys. +func Keys(inputMap cty.Value) (cty.Value, error) { + return KeysFunc.Call([]cty.Value{inputMap}) +} + +// List takes any number of list arguments and returns a list containing those +// values in the same order. +func List(args ...cty.Value) (cty.Value, error) { + return ListFunc.Call(args) +} + +// Lookup performs a dynamic lookup into a map. +// There are two required arguments, map and key, plus an optional default, +// which is a value to return if no key is found in map. +func Lookup(args ...cty.Value) (cty.Value, error) { + return LookupFunc.Call(args) +} + +// Map takes an even number of arguments and returns a map whose elements are constructed +// from consecutive pairs of arguments. +func Map(args ...cty.Value) (cty.Value, error) { + return MapFunc.Call(args) +} + +// Matchkeys constructs a new list by taking a subset of elements from one list +// whose indexes match the corresponding indexes of values in another list. +func Matchkeys(values, keys, searchset cty.Value) (cty.Value, error) { + return MatchkeysFunc.Call([]cty.Value{values, keys, searchset}) +} + +// Merge takes an arbitrary number of maps and returns a single map that contains +// a merged set of elements from all of the maps. +// +// If more than one given map defines the same key then the one that is later in +// the argument sequence takes precedence. +func Merge(maps ...cty.Value) (cty.Value, error) { + return MergeFunc.Call(maps) +} + +// Reverse takes a sequence and produces a new sequence of the same length +// with all of the same elements as the given sequence but in reverse order. +func Reverse(list cty.Value) (cty.Value, error) { + return ReverseFunc.Call([]cty.Value{list}) +} + +// SetProduct computes the cartesian product of sets or sequences. +func SetProduct(sets ...cty.Value) (cty.Value, error) { + return SetProductFunc.Call(sets) +} + +// Slice extracts some consecutive elements from within a list. +func Slice(list, start, end cty.Value) (cty.Value, error) { + return SliceFunc.Call([]cty.Value{list, start, end}) +} + +// Transpose takes a map of lists of strings and swaps the keys and values to +// produce a new map of lists of strings. +func Transpose(values cty.Value) (cty.Value, error) { + return TransposeFunc.Call([]cty.Value{values}) +} + +// Values returns a list of the map values, in the order of the sorted keys. +// This function only works on flat maps. +func Values(values cty.Value) (cty.Value, error) { + return ValuesFunc.Call([]cty.Value{values}) +} + +// Zipmap constructs a map from a list of keys and a corresponding list of values. +func Zipmap(keys, values cty.Value) (cty.Value, error) { + return ZipmapFunc.Call([]cty.Value{keys, values}) +} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/conversion.go b/vendor/github.com/hashicorp/terraform/lang/funcs/conversion.go new file mode 100644 index 0000000000..83f8597972 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/funcs/conversion.go @@ -0,0 +1,87 @@ +package funcs + +import ( + "strconv" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +// MakeToFunc constructs a "to..." function, like "tostring", which converts +// its argument to a specific type or type kind. +// +// The given type wantTy can be any type constraint that cty's "convert" package +// would accept. In particular, this means that you can pass +// cty.List(cty.DynamicPseudoType) to mean "list of any single type", which +// will then cause cty to attempt to unify all of the element types when given +// a tuple. +func MakeToFunc(wantTy cty.Type) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "v", + // We use DynamicPseudoType rather than wantTy here so that + // all values will pass through the function API verbatim and + // we can handle the conversion logic within the Type and + // Impl functions. This allows us to customize the error + // messages to be more appropriate for an explicit type + // conversion, whereas the cty function system produces + // messages aimed at _implicit_ type conversions. + Type: cty.DynamicPseudoType, + AllowNull: true, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + gotTy := args[0].Type() + if gotTy.Equals(wantTy) { + return wantTy, nil + } + conv := convert.GetConversionUnsafe(args[0].Type(), wantTy) + if conv == nil { + // We'll use some specialized errors for some trickier cases, + // but most we can handle in a simple way. + switch { + case gotTy.IsTupleType() && wantTy.IsTupleType(): + return cty.NilType, function.NewArgErrorf(0, "incompatible tuple type for conversion: %s", convert.MismatchMessage(gotTy, wantTy)) + case gotTy.IsObjectType() && wantTy.IsObjectType(): + return cty.NilType, function.NewArgErrorf(0, "incompatible object type for conversion: %s", convert.MismatchMessage(gotTy, wantTy)) + default: + return cty.NilType, function.NewArgErrorf(0, "cannot convert %s to %s", gotTy.FriendlyName(), wantTy.FriendlyNameForConstraint()) + } + } + // If a conversion is available then everything is fine. + return wantTy, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + // We didn't set "AllowUnknown" on our argument, so it is guaranteed + // to be known here but may still be null. + ret, err := convert.Convert(args[0], retType) + if err != nil { + // Because we used GetConversionUnsafe above, conversion can + // still potentially fail in here. For example, if the user + // asks to convert the string "a" to bool then we'll + // optimistically permit it during type checking but fail here + // once we note that the value isn't either "true" or "false". + gotTy := args[0].Type() + switch { + case gotTy == cty.String && wantTy == cty.Bool: + what := "string" + if !args[0].IsNull() { + what = strconv.Quote(args[0].AsString()) + } + return cty.NilVal, function.NewArgErrorf(0, `cannot convert %s to bool; only the strings "true" or "false" are allowed`, what) + case gotTy == cty.String && wantTy == cty.Number: + what := "string" + if !args[0].IsNull() { + what = strconv.Quote(args[0].AsString()) + } + return cty.NilVal, function.NewArgErrorf(0, `cannot convert %s to number; given string must be a decimal representation of a number`, what) + default: + return cty.NilVal, function.NewArgErrorf(0, "cannot convert %s to %s", gotTy.FriendlyName(), wantTy.FriendlyNameForConstraint()) + } + } + return ret, nil + }, + }) +} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/crypto.go b/vendor/github.com/hashicorp/terraform/lang/funcs/crypto.go new file mode 100644 index 0000000000..5cb4bc5c14 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/funcs/crypto.go @@ -0,0 +1,285 @@ +package funcs + +import ( + "crypto/md5" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "crypto/x509" + "encoding/base64" + "encoding/hex" + "encoding/pem" + "fmt" + "hash" + + uuid "github.com/hashicorp/go-uuid" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" + "golang.org/x/crypto/bcrypt" +) + +var UUIDFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + result, err := uuid.GenerateUUID() + if err != nil { + return cty.UnknownVal(cty.String), err + } + return cty.StringVal(result), nil + }, +}) + +// Base64Sha256Func constructs a function that computes the SHA256 hash of a given string +// and encodes it with Base64. +var Base64Sha256Func = makeStringHashFunction(sha256.New, base64.StdEncoding.EncodeToString) + +// MakeFileBase64Sha256Func constructs a function that is like Base64Sha256Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileBase64Sha256Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, sha256.New, base64.StdEncoding.EncodeToString) +} + +// Base64Sha512Func constructs a function that computes the SHA256 hash of a given string +// and encodes it with Base64. +var Base64Sha512Func = makeStringHashFunction(sha512.New, base64.StdEncoding.EncodeToString) + +// MakeFileBase64Sha512Func constructs a function that is like Base64Sha512Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileBase64Sha512Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, sha512.New, base64.StdEncoding.EncodeToString) +} + +// BcryptFunc constructs a function that computes a hash of the given string using the Blowfish cipher. +var BcryptFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + VarParam: &function.Parameter{ + Name: "cost", + Type: cty.Number, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + defaultCost := 10 + + if len(args) > 1 { + var val int + if err := gocty.FromCtyValue(args[1], &val); err != nil { + return cty.UnknownVal(cty.String), err + } + defaultCost = val + } + + if len(args) > 2 { + return cty.UnknownVal(cty.String), fmt.Errorf("bcrypt() takes no more than two arguments") + } + + input := args[0].AsString() + out, err := bcrypt.GenerateFromPassword([]byte(input), defaultCost) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("error occured generating password %s", err.Error()) + } + + return cty.StringVal(string(out)), nil + }, +}) + +// Md5Func constructs a function that computes the MD5 hash of a given string and encodes it with hexadecimal digits. +var Md5Func = makeStringHashFunction(md5.New, hex.EncodeToString) + +// MakeFileMd5Func constructs a function that is like Md5Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileMd5Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, md5.New, hex.EncodeToString) +} + +// RsaDecryptFunc constructs a function that decrypts an RSA-encrypted ciphertext. +var RsaDecryptFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "ciphertext", + Type: cty.String, + }, + { + Name: "privatekey", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + s := args[0].AsString() + key := args[1].AsString() + + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to decode input %q: cipher text must be base64-encoded", s) + } + + block, _ := pem.Decode([]byte(key)) + if block == nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to parse key: no key found") + } + if block.Headers["Proc-Type"] == "4,ENCRYPTED" { + return cty.UnknownVal(cty.String), fmt.Errorf( + "failed to parse key: password protected keys are not supported. Please decrypt the key prior to use", + ) + } + + x509Key, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + out, err := rsa.DecryptPKCS1v15(nil, x509Key, b) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(string(out)), nil + }, +}) + +// Sha1Func contructs a function that computes the SHA1 hash of a given string +// and encodes it with hexadecimal digits. +var Sha1Func = makeStringHashFunction(sha1.New, hex.EncodeToString) + +// MakeFileSha1Func constructs a function that is like Sha1Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileSha1Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, sha1.New, hex.EncodeToString) +} + +// Sha256Func contructs a function that computes the SHA256 hash of a given string +// and encodes it with hexadecimal digits. +var Sha256Func = makeStringHashFunction(sha256.New, hex.EncodeToString) + +// MakeFileSha256Func constructs a function that is like Sha256Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileSha256Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, sha256.New, hex.EncodeToString) +} + +// Sha512Func contructs a function that computes the SHA512 hash of a given string +// and encodes it with hexadecimal digits. +var Sha512Func = makeStringHashFunction(sha512.New, hex.EncodeToString) + +// MakeFileSha512Func constructs a function that is like Sha512Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileSha512Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, sha512.New, hex.EncodeToString) +} + +func makeStringHashFunction(hf func() hash.Hash, enc func([]byte) string) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + s := args[0].AsString() + h := hf() + h.Write([]byte(s)) + rv := enc(h.Sum(nil)) + return cty.StringVal(rv), nil + }, + }) +} + +func makeFileHashFunction(baseDir string, hf func() hash.Hash, enc func([]byte) string) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + path := args[0].AsString() + src, err := readFileBytes(baseDir, path) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + h := hf() + h.Write(src) + rv := enc(h.Sum(nil)) + return cty.StringVal(rv), nil + }, + }) +} + +// UUID generates and returns a Type-4 UUID in the standard hexadecimal string +// format. +// +// This is not a pure function: it will generate a different result for each +// call. It must therefore be registered as an impure function in the function +// table in the "lang" package. +func UUID() (cty.Value, error) { + return UUIDFunc.Call(nil) +} + +// Base64Sha256 computes the SHA256 hash of a given string and encodes it with +// Base64. +// +// The given string is first encoded as UTF-8 and then the SHA256 algorithm is applied +// as defined in RFC 4634. The raw hash is then encoded with Base64 before returning. +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +func Base64Sha256(str cty.Value) (cty.Value, error) { + return Base64Sha256Func.Call([]cty.Value{str}) +} + +// Base64Sha512 computes the SHA512 hash of a given string and encodes it with +// Base64. +// +// The given string is first encoded as UTF-8 and then the SHA256 algorithm is applied +// as defined in RFC 4634. The raw hash is then encoded with Base64 before returning. +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4 +func Base64Sha512(str cty.Value) (cty.Value, error) { + return Base64Sha512Func.Call([]cty.Value{str}) +} + +// Bcrypt computes a hash of the given string using the Blowfish cipher, +// returning a string in the Modular Crypt Format +// usually expected in the shadow password file on many Unix systems. +func Bcrypt(str cty.Value, cost ...cty.Value) (cty.Value, error) { + args := make([]cty.Value, len(cost)+1) + args[0] = str + copy(args[1:], cost) + return BcryptFunc.Call(args) +} + +// Md5 computes the MD5 hash of a given string and encodes it with hexadecimal digits. +func Md5(str cty.Value) (cty.Value, error) { + return Md5Func.Call([]cty.Value{str}) +} + +// RsaDecrypt decrypts an RSA-encrypted ciphertext, returning the corresponding +// cleartext. +func RsaDecrypt(ciphertext, privatekey cty.Value) (cty.Value, error) { + return RsaDecryptFunc.Call([]cty.Value{ciphertext, privatekey}) +} + +// Sha1 computes the SHA1 hash of a given string and encodes it with hexadecimal digits. +func Sha1(str cty.Value) (cty.Value, error) { + return Sha1Func.Call([]cty.Value{str}) +} + +// Sha256 computes the SHA256 hash of a given string and encodes it with hexadecimal digits. +func Sha256(str cty.Value) (cty.Value, error) { + return Sha256Func.Call([]cty.Value{str}) +} + +// Sha512 computes the SHA512 hash of a given string and encodes it with hexadecimal digits. +func Sha512(str cty.Value) (cty.Value, error) { + return Sha512Func.Call([]cty.Value{str}) +} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/datetime.go b/vendor/github.com/hashicorp/terraform/lang/funcs/datetime.go new file mode 100644 index 0000000000..5dae198774 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/funcs/datetime.go @@ -0,0 +1,70 @@ +package funcs + +import ( + "time" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// TimestampFunc constructs a function that returns a string representation of the current date and time. +var TimestampFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(time.Now().UTC().Format(time.RFC3339)), nil + }, +}) + +// TimeAddFunc constructs a function that adds a duration to a timestamp, returning a new timestamp. +var TimeAddFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "timestamp", + Type: cty.String, + }, + { + Name: "duration", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + ts, err := time.Parse(time.RFC3339, args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), err + } + duration, err := time.ParseDuration(args[1].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(ts.Add(duration).Format(time.RFC3339)), nil + }, +}) + +// Timestamp returns a string representation of the current date and time. +// +// In the Terraform language, timestamps are conventionally represented as +// strings using RFC 3339 "Date and Time format" syntax, and so timestamp +// returns a string in this format. +func Timestamp() (cty.Value, error) { + return TimestampFunc.Call([]cty.Value{}) +} + +// TimeAdd adds a duration to a timestamp, returning a new timestamp. +// +// In the Terraform language, timestamps are conventionally represented as +// strings using RFC 3339 "Date and Time format" syntax. Timeadd requires +// the timestamp argument to be a string conforming to this syntax. +// +// `duration` is a string representation of a time difference, consisting of +// sequences of number and unit pairs, like `"1.5h"` or `1h30m`. The accepted +// units are `ns`, `us` (or `µs`), `"ms"`, `"s"`, `"m"`, and `"h"`. The first +// number may be negative to indicate a negative duration, like `"-2h5m"`. +// +// The result is a string, also in RFC 3339 format, representing the result +// of adding the given direction to the given timestamp. +func TimeAdd(timestamp cty.Value, duration cty.Value) (cty.Value, error) { + return TimeAddFunc.Call([]cty.Value{timestamp, duration}) +} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/encoding.go b/vendor/github.com/hashicorp/terraform/lang/funcs/encoding.go new file mode 100644 index 0000000000..af93f08dc1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/funcs/encoding.go @@ -0,0 +1,140 @@ +package funcs + +import ( + "bytes" + "compress/gzip" + "encoding/base64" + "fmt" + "log" + "net/url" + "unicode/utf8" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// Base64DecodeFunc constructs a function that decodes a string containing a base64 sequence. +var Base64DecodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + s := args[0].AsString() + sDec, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to decode base64 data '%s'", s) + } + if !utf8.Valid([]byte(sDec)) { + log.Printf("[DEBUG] the result of decoding the the provided string is not valid UTF-8: %s", sDec) + return cty.UnknownVal(cty.String), fmt.Errorf("the result of decoding the the provided string is not valid UTF-8") + } + return cty.StringVal(string(sDec)), nil + }, +}) + +// Base64EncodeFunc constructs a function that encodes a string to a base64 sequence. +var Base64EncodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(base64.StdEncoding.EncodeToString([]byte(args[0].AsString()))), nil + }, +}) + +// Base64GzipFunc constructs a function that compresses a string with gzip and then encodes the result in +// Base64 encoding. +var Base64GzipFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + s := args[0].AsString() + + var b bytes.Buffer + gz := gzip.NewWriter(&b) + if _, err := gz.Write([]byte(s)); err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to write gzip raw data: '%s'", s) + } + if err := gz.Flush(); err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to flush gzip writer: '%s'", s) + } + if err := gz.Close(); err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to close gzip writer: '%s'", s) + } + return cty.StringVal(base64.StdEncoding.EncodeToString(b.Bytes())), nil + }, +}) + +// URLEncodeFunc constructs a function that applies URL encoding to a given string. +var URLEncodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(url.QueryEscape(args[0].AsString())), nil + }, +}) + +// Base64Decode decodes a string containing a base64 sequence. +// +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +// +// Strings in the Terraform language are sequences of unicode characters rather +// than bytes, so this function will also interpret the resulting bytes as +// UTF-8. If the bytes after Base64 decoding are _not_ valid UTF-8, this function +// produces an error. +func Base64Decode(str cty.Value) (cty.Value, error) { + return Base64DecodeFunc.Call([]cty.Value{str}) +} + +// Base64Encode applies Base64 encoding to a string. +// +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +// +// Strings in the Terraform language are sequences of unicode characters rather +// than bytes, so this function will first encode the characters from the string +// as UTF-8, and then apply Base64 encoding to the result. +func Base64Encode(str cty.Value) (cty.Value, error) { + return Base64EncodeFunc.Call([]cty.Value{str}) +} + +// Base64Gzip compresses a string with gzip and then encodes the result in +// Base64 encoding. +// +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +// +// Strings in the Terraform language are sequences of unicode characters rather +// than bytes, so this function will first encode the characters from the string +// as UTF-8, then apply gzip compression, and then finally apply Base64 encoding. +func Base64Gzip(str cty.Value) (cty.Value, error) { + return Base64GzipFunc.Call([]cty.Value{str}) +} + +// URLEncode applies URL encoding to a given string. +// +// This function identifies characters in the given string that would have a +// special meaning when included as a query string argument in a URL and +// escapes them using RFC 3986 "percent encoding". +// +// If the given string contains non-ASCII characters, these are first encoded as +// UTF-8 and then percent encoding is applied separately to each UTF-8 byte. +func URLEncode(str cty.Value) (cty.Value, error) { + return URLEncodeFunc.Call([]cty.Value{str}) +} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/filesystem.go b/vendor/github.com/hashicorp/terraform/lang/funcs/filesystem.go new file mode 100644 index 0000000000..7dfc905875 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/funcs/filesystem.go @@ -0,0 +1,345 @@ +package funcs + +import ( + "encoding/base64" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "unicode/utf8" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + homedir "github.com/mitchellh/go-homedir" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// MakeFileFunc constructs a function that takes a file path and returns the +// contents of that file, either directly as a string (where valid UTF-8 is +// required) or as a string containing base64 bytes. +func MakeFileFunc(baseDir string, encBase64 bool) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + path := args[0].AsString() + src, err := readFileBytes(baseDir, path) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + switch { + case encBase64: + enc := base64.StdEncoding.EncodeToString(src) + return cty.StringVal(enc), nil + default: + if !utf8.Valid(src) { + return cty.UnknownVal(cty.String), fmt.Errorf("contents of %s are not valid UTF-8; use the filebase64 function to obtain the Base64 encoded contents or the other file functions (e.g. filemd5, filesha256) to obtain file hashing results instead", path) + } + return cty.StringVal(string(src)), nil + } + }, + }) +} + +// MakeTemplateFileFunc constructs a function that takes a file path and +// an arbitrary object of named values and attempts to render the referenced +// file as a template using HCL template syntax. +// +// The template itself may recursively call other functions so a callback +// must be provided to get access to those functions. The template cannot, +// however, access any variables defined in the scope: it is restricted only to +// those variables provided in the second function argument, to ensure that all +// dependencies on other graph nodes can be seen before executing this function. +// +// As a special exception, a referenced template file may not recursively call +// the templatefile function, since that would risk the same file being +// included into itself indefinitely. +func MakeTemplateFileFunc(baseDir string, funcsCb func() map[string]function.Function) function.Function { + + params := []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + { + Name: "vars", + Type: cty.DynamicPseudoType, + }, + } + + loadTmpl := func(fn string) (hcl.Expression, error) { + // We re-use File here to ensure the same filename interpretation + // as it does, along with its other safety checks. + tmplVal, err := File(baseDir, cty.StringVal(fn)) + if err != nil { + return nil, err + } + + expr, diags := hclsyntax.ParseTemplate([]byte(tmplVal.AsString()), fn, hcl.Pos{Line: 1, Column: 1}) + if diags.HasErrors() { + return nil, diags + } + + return expr, nil + } + + renderTmpl := func(expr hcl.Expression, varsVal cty.Value) (cty.Value, error) { + if varsTy := varsVal.Type(); !(varsTy.IsMapType() || varsTy.IsObjectType()) { + return cty.DynamicVal, function.NewArgErrorf(1, "invalid vars value: must be a map") // or an object, but we don't strongly distinguish these most of the time + } + + ctx := &hcl.EvalContext{ + Variables: varsVal.AsValueMap(), + } + + // We'll pre-check references in the template here so we can give a + // more specialized error message than HCL would by default, so it's + // clearer that this problem is coming from a templatefile call. + for _, traversal := range expr.Variables() { + root := traversal.RootName() + if _, ok := ctx.Variables[root]; !ok { + return cty.DynamicVal, function.NewArgErrorf(1, "vars map does not contain key %q, referenced at %s", root, traversal[0].SourceRange()) + } + } + + givenFuncs := funcsCb() // this callback indirection is to avoid chicken/egg problems + funcs := make(map[string]function.Function, len(givenFuncs)) + for name, fn := range givenFuncs { + if name == "templatefile" { + // We stub this one out to prevent recursive calls. + funcs[name] = function.New(&function.Spec{ + Params: params, + Type: func(args []cty.Value) (cty.Type, error) { + return cty.NilType, fmt.Errorf("cannot recursively call templatefile from inside templatefile call") + }, + }) + continue + } + funcs[name] = fn + } + ctx.Functions = funcs + + val, diags := expr.Value(ctx) + if diags.HasErrors() { + return cty.DynamicVal, diags + } + return val, nil + } + + return function.New(&function.Spec{ + Params: params, + Type: func(args []cty.Value) (cty.Type, error) { + if !(args[0].IsKnown() && args[1].IsKnown()) { + return cty.DynamicPseudoType, nil + } + + // We'll render our template now to see what result type it produces. + // A template consisting only of a single interpolation an potentially + // return any type. + expr, err := loadTmpl(args[0].AsString()) + if err != nil { + return cty.DynamicPseudoType, err + } + + // This is safe even if args[1] contains unknowns because the HCL + // template renderer itself knows how to short-circuit those. + val, err := renderTmpl(expr, args[1]) + return val.Type(), err + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + expr, err := loadTmpl(args[0].AsString()) + if err != nil { + return cty.DynamicVal, err + } + return renderTmpl(expr, args[1]) + }, + }) + +} + +// MakeFileExistsFunc constructs a function that takes a path +// and determines whether a file exists at that path +func MakeFileExistsFunc(baseDir string) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + path := args[0].AsString() + path, err := homedir.Expand(path) + if err != nil { + return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to expand ~: %s", err) + } + + if !filepath.IsAbs(path) { + path = filepath.Join(baseDir, path) + } + + // Ensure that the path is canonical for the host OS + path = filepath.Clean(path) + + fi, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + return cty.False, nil + } + return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to stat %s", path) + } + + if fi.Mode().IsRegular() { + return cty.True, nil + } + + return cty.False, fmt.Errorf("%s is not a regular file, but %q", + path, fi.Mode().String()) + }, + }) +} + +// BasenameFunc constructs a function that takes a string containing a filesystem path +// and removes all except the last portion from it. +var BasenameFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(filepath.Base(args[0].AsString())), nil + }, +}) + +// DirnameFunc constructs a function that takes a string containing a filesystem path +// and removes the last portion from it. +var DirnameFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(filepath.Dir(args[0].AsString())), nil + }, +}) + +// PathExpandFunc constructs a function that expands a leading ~ character to the current user's home directory. +var PathExpandFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + + homePath, err := homedir.Expand(args[0].AsString()) + return cty.StringVal(homePath), err + }, +}) + +func readFileBytes(baseDir, path string) ([]byte, error) { + path, err := homedir.Expand(path) + if err != nil { + return nil, fmt.Errorf("failed to expand ~: %s", err) + } + + if !filepath.IsAbs(path) { + path = filepath.Join(baseDir, path) + } + + // Ensure that the path is canonical for the host OS + path = filepath.Clean(path) + + src, err := ioutil.ReadFile(path) + if err != nil { + // ReadFile does not return Terraform-user-friendly error + // messages, so we'll provide our own. + if os.IsNotExist(err) { + return nil, fmt.Errorf("no file exists at %s", path) + } + return nil, fmt.Errorf("failed to read %s", path) + } + + return src, nil +} + +// File reads the contents of the file at the given path. +// +// The file must contain valid UTF-8 bytes, or this function will return an error. +// +// The underlying function implementation works relative to a particular base +// directory, so this wrapper takes a base directory string and uses it to +// construct the underlying function before calling it. +func File(baseDir string, path cty.Value) (cty.Value, error) { + fn := MakeFileFunc(baseDir, false) + return fn.Call([]cty.Value{path}) +} + +// FileExists determines whether a file exists at the given path. +// +// The underlying function implementation works relative to a particular base +// directory, so this wrapper takes a base directory string and uses it to +// construct the underlying function before calling it. +func FileExists(baseDir string, path cty.Value) (cty.Value, error) { + fn := MakeFileExistsFunc(baseDir) + return fn.Call([]cty.Value{path}) +} + +// FileBase64 reads the contents of the file at the given path. +// +// The bytes from the file are encoded as base64 before returning. +// +// The underlying function implementation works relative to a particular base +// directory, so this wrapper takes a base directory string and uses it to +// construct the underlying function before calling it. +func FileBase64(baseDir string, path cty.Value) (cty.Value, error) { + fn := MakeFileFunc(baseDir, true) + return fn.Call([]cty.Value{path}) +} + +// Basename takes a string containing a filesystem path and removes all except the last portion from it. +// +// The underlying function implementation works only with the path string and does not access the filesystem itself. +// It is therefore unable to take into account filesystem features such as symlinks. +// +// If the path is empty then the result is ".", representing the current working directory. +func Basename(path cty.Value) (cty.Value, error) { + return BasenameFunc.Call([]cty.Value{path}) +} + +// Dirname takes a string containing a filesystem path and removes the last portion from it. +// +// The underlying function implementation works only with the path string and does not access the filesystem itself. +// It is therefore unable to take into account filesystem features such as symlinks. +// +// If the path is empty then the result is ".", representing the current working directory. +func Dirname(path cty.Value) (cty.Value, error) { + return DirnameFunc.Call([]cty.Value{path}) +} + +// Pathexpand takes a string that might begin with a `~` segment, and if so it replaces that segment with +// the current user's home directory path. +// +// The underlying function implementation works only with the path string and does not access the filesystem itself. +// It is therefore unable to take into account filesystem features such as symlinks. +// +// If the leading segment in the path is not `~` then the given path is returned unmodified. +func Pathexpand(path cty.Value) (cty.Value, error) { + return PathExpandFunc.Call([]cty.Value{path}) +} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/number.go b/vendor/github.com/hashicorp/terraform/lang/funcs/number.go new file mode 100644 index 0000000000..15cfe7182a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/funcs/number.go @@ -0,0 +1,155 @@ +package funcs + +import ( + "math" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" +) + +// CeilFunc contructs a function that returns the closest whole number greater +// than or equal to the given value. +var CeilFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var val float64 + if err := gocty.FromCtyValue(args[0], &val); err != nil { + return cty.UnknownVal(cty.String), err + } + return cty.NumberIntVal(int64(math.Ceil(val))), nil + }, +}) + +// FloorFunc contructs a function that returns the closest whole number lesser +// than or equal to the given value. +var FloorFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var val float64 + if err := gocty.FromCtyValue(args[0], &val); err != nil { + return cty.UnknownVal(cty.String), err + } + return cty.NumberIntVal(int64(math.Floor(val))), nil + }, +}) + +// LogFunc contructs a function that returns the logarithm of a given number in a given base. +var LogFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + }, + { + Name: "base", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var num float64 + if err := gocty.FromCtyValue(args[0], &num); err != nil { + return cty.UnknownVal(cty.String), err + } + + var base float64 + if err := gocty.FromCtyValue(args[1], &base); err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.NumberFloatVal(math.Log(num) / math.Log(base)), nil + }, +}) + +// PowFunc contructs a function that returns the logarithm of a given number in a given base. +var PowFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + }, + { + Name: "power", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var num float64 + if err := gocty.FromCtyValue(args[0], &num); err != nil { + return cty.UnknownVal(cty.String), err + } + + var power float64 + if err := gocty.FromCtyValue(args[1], &power); err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.NumberFloatVal(math.Pow(num, power)), nil + }, +}) + +// SignumFunc contructs a function that returns the closest whole number greater +// than or equal to the given value. +var SignumFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var num int + if err := gocty.FromCtyValue(args[0], &num); err != nil { + return cty.UnknownVal(cty.String), err + } + switch { + case num < 0: + return cty.NumberIntVal(-1), nil + case num > 0: + return cty.NumberIntVal(+1), nil + default: + return cty.NumberIntVal(0), nil + } + }, +}) + +// Ceil returns the closest whole number greater than or equal to the given value. +func Ceil(num cty.Value) (cty.Value, error) { + return CeilFunc.Call([]cty.Value{num}) +} + +// Floor returns the closest whole number lesser than or equal to the given value. +func Floor(num cty.Value) (cty.Value, error) { + return FloorFunc.Call([]cty.Value{num}) +} + +// Log returns returns the logarithm of a given number in a given base. +func Log(num, base cty.Value) (cty.Value, error) { + return LogFunc.Call([]cty.Value{num, base}) +} + +// Pow returns the logarithm of a given number in a given base. +func Pow(num, power cty.Value) (cty.Value, error) { + return PowFunc.Call([]cty.Value{num, power}) +} + +// Signum determines the sign of a number, returning a number between -1 and +// 1 to represent the sign. +func Signum(num cty.Value) (cty.Value, error) { + return SignumFunc.Call([]cty.Value{num}) +} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/string.go b/vendor/github.com/hashicorp/terraform/lang/funcs/string.go new file mode 100644 index 0000000000..c9ddf19e36 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/funcs/string.go @@ -0,0 +1,280 @@ +package funcs + +import ( + "fmt" + "regexp" + "sort" + "strings" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" +) + +var JoinFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "separator", + Type: cty.String, + }, + }, + VarParam: &function.Parameter{ + Name: "lists", + Type: cty.List(cty.String), + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + sep := args[0].AsString() + listVals := args[1:] + if len(listVals) < 1 { + return cty.UnknownVal(cty.String), fmt.Errorf("at least one list is required") + } + + l := 0 + for _, list := range listVals { + if !list.IsWhollyKnown() { + return cty.UnknownVal(cty.String), nil + } + l += list.LengthInt() + } + + items := make([]string, 0, l) + for ai, list := range listVals { + ei := 0 + for it := list.ElementIterator(); it.Next(); { + _, val := it.Element() + if val.IsNull() { + if len(listVals) > 1 { + return cty.UnknownVal(cty.String), function.NewArgErrorf(ai+1, "element %d of list %d is null; cannot concatenate null values", ei, ai+1) + } + return cty.UnknownVal(cty.String), function.NewArgErrorf(ai+1, "element %d is null; cannot concatenate null values", ei) + } + items = append(items, val.AsString()) + ei++ + } + } + + return cty.StringVal(strings.Join(items, sep)), nil + }, +}) + +var SortFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.List(cty.String), + }, + }, + Type: function.StaticReturnType(cty.List(cty.String)), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + listVal := args[0] + + if !listVal.IsWhollyKnown() { + // If some of the element values aren't known yet then we + // can't yet preduct the order of the result. + return cty.UnknownVal(retType), nil + } + if listVal.LengthInt() == 0 { // Easy path + return listVal, nil + } + + list := make([]string, 0, listVal.LengthInt()) + for it := listVal.ElementIterator(); it.Next(); { + iv, v := it.Element() + if v.IsNull() { + return cty.UnknownVal(retType), fmt.Errorf("given list element %s is null; a null string cannot be sorted", iv.AsBigFloat().String()) + } + list = append(list, v.AsString()) + } + + sort.Strings(list) + retVals := make([]cty.Value, len(list)) + for i, s := range list { + retVals[i] = cty.StringVal(s) + } + return cty.ListVal(retVals), nil + }, +}) + +var SplitFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "separator", + Type: cty.String, + }, + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.List(cty.String)), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + sep := args[0].AsString() + str := args[1].AsString() + elems := strings.Split(str, sep) + elemVals := make([]cty.Value, len(elems)) + for i, s := range elems { + elemVals[i] = cty.StringVal(s) + } + if len(elemVals) == 0 { + return cty.ListValEmpty(cty.String), nil + } + return cty.ListVal(elemVals), nil + }, +}) + +// ChompFunc constructions a function that removes newline characters at the end of a string. +var ChompFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + newlines := regexp.MustCompile(`(?:\r\n?|\n)*\z`) + return cty.StringVal(newlines.ReplaceAllString(args[0].AsString(), "")), nil + }, +}) + +// IndentFunc constructions a function that adds a given number of spaces to the +// beginnings of all but the first line in a given multi-line string. +var IndentFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "spaces", + Type: cty.Number, + }, + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var spaces int + if err := gocty.FromCtyValue(args[0], &spaces); err != nil { + return cty.UnknownVal(cty.String), err + } + data := args[1].AsString() + pad := strings.Repeat(" ", spaces) + return cty.StringVal(strings.Replace(data, "\n", "\n"+pad, -1)), nil + }, +}) + +// ReplaceFunc constructions a function that searches a given string for another +// given substring, and replaces each occurence with a given replacement string. +var ReplaceFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + { + Name: "substr", + Type: cty.String, + }, + { + Name: "replace", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + str := args[0].AsString() + substr := args[1].AsString() + replace := args[2].AsString() + + // We search/replace using a regexp if the string is surrounded + // in forward slashes. + if len(substr) > 1 && substr[0] == '/' && substr[len(substr)-1] == '/' { + re, err := regexp.Compile(substr[1 : len(substr)-1]) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(re.ReplaceAllString(str, replace)), nil + } + + return cty.StringVal(strings.Replace(str, substr, replace, -1)), nil + }, +}) + +// TitleFunc constructions a function that converts the first letter of each word +// in the given string to uppercase. +var TitleFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return cty.StringVal(strings.Title(args[0].AsString())), nil + }, +}) + +// TrimSpaceFunc constructions a function that removes any space characters from +// the start and end of the given string. +var TrimSpaceFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return cty.StringVal(strings.TrimSpace(args[0].AsString())), nil + }, +}) + +// Join concatenates together the string elements of one or more lists with a +// given separator. +func Join(sep cty.Value, lists ...cty.Value) (cty.Value, error) { + args := make([]cty.Value, len(lists)+1) + args[0] = sep + copy(args[1:], lists) + return JoinFunc.Call(args) +} + +// Sort re-orders the elements of a given list of strings so that they are +// in ascending lexicographical order. +func Sort(list cty.Value) (cty.Value, error) { + return SortFunc.Call([]cty.Value{list}) +} + +// Split divides a given string by a given separator, returning a list of +// strings containing the characters between the separator sequences. +func Split(sep, str cty.Value) (cty.Value, error) { + return SplitFunc.Call([]cty.Value{sep, str}) +} + +// Chomp removes newline characters at the end of a string. +func Chomp(str cty.Value) (cty.Value, error) { + return ChompFunc.Call([]cty.Value{str}) +} + +// Indent adds a given number of spaces to the beginnings of all but the first +// line in a given multi-line string. +func Indent(spaces, str cty.Value) (cty.Value, error) { + return IndentFunc.Call([]cty.Value{spaces, str}) +} + +// Replace searches a given string for another given substring, +// and replaces all occurences with a given replacement string. +func Replace(str, substr, replace cty.Value) (cty.Value, error) { + return ReplaceFunc.Call([]cty.Value{str, substr, replace}) +} + +// Title converts the first letter of each word in the given string to uppercase. +func Title(str cty.Value) (cty.Value, error) { + return TitleFunc.Call([]cty.Value{str}) +} + +// TrimSpace removes any space characters from the start and end of the given string. +func TrimSpace(str cty.Value) (cty.Value, error) { + return TrimSpaceFunc.Call([]cty.Value{str}) +} diff --git a/vendor/github.com/hashicorp/terraform/lang/functions.go b/vendor/github.com/hashicorp/terraform/lang/functions.go new file mode 100644 index 0000000000..bcda0e2966 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/functions.go @@ -0,0 +1,147 @@ +package lang + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/function/stdlib" + + "github.com/hashicorp/terraform/lang/funcs" +) + +var impureFunctions = []string{ + "bcrypt", + "timestamp", + "uuid", +} + +// Functions returns the set of functions that should be used to when evaluating +// expressions in the receiving scope. +func (s *Scope) Functions() map[string]function.Function { + s.funcsLock.Lock() + if s.funcs == nil { + // Some of our functions are just directly the cty stdlib functions. + // Others are implemented in the subdirectory "funcs" here in this + // repository. New functions should generally start out their lives + // in the "funcs" directory and potentially graduate to cty stdlib + // later if the functionality seems to be something domain-agnostic + // that would be useful to all applications using cty functions. + + s.funcs = map[string]function.Function{ + "abs": stdlib.AbsoluteFunc, + "basename": funcs.BasenameFunc, + "base64decode": funcs.Base64DecodeFunc, + "base64encode": funcs.Base64EncodeFunc, + "base64gzip": funcs.Base64GzipFunc, + "base64sha256": funcs.Base64Sha256Func, + "base64sha512": funcs.Base64Sha512Func, + "bcrypt": funcs.BcryptFunc, + "ceil": funcs.CeilFunc, + "chomp": funcs.ChompFunc, + "cidrhost": funcs.CidrHostFunc, + "cidrnetmask": funcs.CidrNetmaskFunc, + "cidrsubnet": funcs.CidrSubnetFunc, + "coalesce": funcs.CoalesceFunc, + "coalescelist": funcs.CoalesceListFunc, + "compact": funcs.CompactFunc, + "concat": stdlib.ConcatFunc, + "contains": funcs.ContainsFunc, + "csvdecode": stdlib.CSVDecodeFunc, + "dirname": funcs.DirnameFunc, + "distinct": funcs.DistinctFunc, + "element": funcs.ElementFunc, + "chunklist": funcs.ChunklistFunc, + "file": funcs.MakeFileFunc(s.BaseDir, false), + "fileexists": funcs.MakeFileExistsFunc(s.BaseDir), + "filebase64": funcs.MakeFileFunc(s.BaseDir, true), + "filebase64sha256": funcs.MakeFileBase64Sha256Func(s.BaseDir), + "filebase64sha512": funcs.MakeFileBase64Sha512Func(s.BaseDir), + "filemd5": funcs.MakeFileMd5Func(s.BaseDir), + "filesha1": funcs.MakeFileSha1Func(s.BaseDir), + "filesha256": funcs.MakeFileSha256Func(s.BaseDir), + "filesha512": funcs.MakeFileSha512Func(s.BaseDir), + "flatten": funcs.FlattenFunc, + "floor": funcs.FloorFunc, + "format": stdlib.FormatFunc, + "formatdate": stdlib.FormatDateFunc, + "formatlist": stdlib.FormatListFunc, + "indent": funcs.IndentFunc, + "index": funcs.IndexFunc, + "join": funcs.JoinFunc, + "jsondecode": stdlib.JSONDecodeFunc, + "jsonencode": stdlib.JSONEncodeFunc, + "keys": funcs.KeysFunc, + "length": funcs.LengthFunc, + "list": funcs.ListFunc, + "log": funcs.LogFunc, + "lookup": funcs.LookupFunc, + "lower": stdlib.LowerFunc, + "map": funcs.MapFunc, + "matchkeys": funcs.MatchkeysFunc, + "max": stdlib.MaxFunc, + "md5": funcs.Md5Func, + "merge": funcs.MergeFunc, + "min": stdlib.MinFunc, + "pathexpand": funcs.PathExpandFunc, + "pow": funcs.PowFunc, + "replace": funcs.ReplaceFunc, + "reverse": funcs.ReverseFunc, + "rsadecrypt": funcs.RsaDecryptFunc, + "sethaselement": stdlib.SetHasElementFunc, + "setintersection": stdlib.SetIntersectionFunc, + "setproduct": funcs.SetProductFunc, + "setunion": stdlib.SetUnionFunc, + "sha1": funcs.Sha1Func, + "sha256": funcs.Sha256Func, + "sha512": funcs.Sha512Func, + "signum": funcs.SignumFunc, + "slice": funcs.SliceFunc, + "sort": funcs.SortFunc, + "split": funcs.SplitFunc, + "substr": stdlib.SubstrFunc, + "timestamp": funcs.TimestampFunc, + "timeadd": funcs.TimeAddFunc, + "title": funcs.TitleFunc, + "tostring": funcs.MakeToFunc(cty.String), + "tonumber": funcs.MakeToFunc(cty.Number), + "tobool": funcs.MakeToFunc(cty.Bool), + "toset": funcs.MakeToFunc(cty.Set(cty.DynamicPseudoType)), + "tolist": funcs.MakeToFunc(cty.List(cty.DynamicPseudoType)), + "tomap": funcs.MakeToFunc(cty.Map(cty.DynamicPseudoType)), + "transpose": funcs.TransposeFunc, + "trimspace": funcs.TrimSpaceFunc, + "upper": stdlib.UpperFunc, + "urlencode": funcs.URLEncodeFunc, + "uuid": funcs.UUIDFunc, + "values": funcs.ValuesFunc, + "zipmap": funcs.ZipmapFunc, + } + + s.funcs["templatefile"] = funcs.MakeTemplateFileFunc(s.BaseDir, func() map[string]function.Function { + // The templatefile function prevents recursive calls to itself + // by copying this map and overwriting the "templatefile" entry. + return s.funcs + }) + + if s.PureOnly { + // Force our few impure functions to return unknown so that we + // can defer evaluating them until a later pass. + for _, name := range impureFunctions { + s.funcs[name] = function.Unpredictable(s.funcs[name]) + } + } + } + s.funcsLock.Unlock() + + return s.funcs +} + +var unimplFunc = function.New(&function.Spec{ + Type: func([]cty.Value) (cty.Type, error) { + return cty.DynamicPseudoType, fmt.Errorf("function not yet implemented") + }, + Impl: func([]cty.Value, cty.Type) (cty.Value, error) { + return cty.DynamicVal, fmt.Errorf("function not yet implemented") + }, +}) diff --git a/vendor/github.com/hashicorp/terraform/lang/references.go b/vendor/github.com/hashicorp/terraform/lang/references.go new file mode 100644 index 0000000000..d688477aac --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/references.go @@ -0,0 +1,81 @@ +package lang + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/lang/blocktoattr" + "github.com/hashicorp/terraform/tfdiags" +) + +// References finds all of the references in the given set of traversals, +// returning diagnostics if any of the traversals cannot be interpreted as a +// reference. +// +// This function does not do any de-duplication of references, since references +// have source location information embedded in them and so any invalid +// references that are duplicated should have errors reported for each +// occurence. +// +// If the returned diagnostics contains errors then the result may be +// incomplete or invalid. Otherwise, the returned slice has one reference per +// given traversal, though it is not guaranteed that the references will +// appear in the same order as the given traversals. +func References(traversals []hcl.Traversal) ([]*addrs.Reference, tfdiags.Diagnostics) { + if len(traversals) == 0 { + return nil, nil + } + + var diags tfdiags.Diagnostics + refs := make([]*addrs.Reference, 0, len(traversals)) + + for _, traversal := range traversals { + ref, refDiags := addrs.ParseRef(traversal) + diags = diags.Append(refDiags) + if ref == nil { + continue + } + refs = append(refs, ref) + } + + return refs, diags +} + +// ReferencesInBlock is a helper wrapper around References that first searches +// the given body for traversals, before converting those traversals to +// references. +// +// A block schema must be provided so that this function can determine where in +// the body variables are expected. +func ReferencesInBlock(body hcl.Body, schema *configschema.Block) ([]*addrs.Reference, tfdiags.Diagnostics) { + if body == nil { + return nil, nil + } + + // We use blocktoattr.ExpandedVariables instead of hcldec.Variables or + // dynblock.VariablesHCLDec here because when we evaluate a block we'll + // first apply the dynamic block extension and _then_ the blocktoattr + // transform, and so blocktoattr.ExpandedVariables takes into account + // both of those transforms when it analyzes the body to ensure we find + // all of the references as if they'd already moved into their final + // locations, even though we can't expand dynamic blocks yet until we + // already know which variables are required. + // + // The set of cases we want to detect here is covered by the tests for + // the plan graph builder in the main 'terraform' package, since it's + // in a better position to test this due to having mock providers etc + // available. + traversals := blocktoattr.ExpandedVariables(body, schema) + return References(traversals) +} + +// ReferencesInExpr is a helper wrapper around References that first searches +// the given expression for traversals, before converting those traversals +// to references. +func ReferencesInExpr(expr hcl.Expression) ([]*addrs.Reference, tfdiags.Diagnostics) { + if expr == nil { + return nil, nil + } + traversals := expr.Variables() + return References(traversals) +} diff --git a/vendor/github.com/hashicorp/terraform/lang/scope.go b/vendor/github.com/hashicorp/terraform/lang/scope.go new file mode 100644 index 0000000000..98fca6baad --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/scope.go @@ -0,0 +1,34 @@ +package lang + +import ( + "sync" + + "github.com/zclconf/go-cty/cty/function" + + "github.com/hashicorp/terraform/addrs" +) + +// Scope is the main type in this package, allowing dynamic evaluation of +// blocks and expressions based on some contextual information that informs +// which variables and functions will be available. +type Scope struct { + // Data is used to resolve references in expressions. + Data Data + + // SelfAddr is the address that the "self" object should be an alias of, + // or nil if the "self" object should not be available at all. + SelfAddr addrs.Referenceable + + // BaseDir is the base directory used by any interpolation functions that + // accept filesystem paths as arguments. + BaseDir string + + // PureOnly can be set to true to request that any non-pure functions + // produce unknown value results rather than actually executing. This is + // important during a plan phase to avoid generating results that could + // then differ during apply. + PureOnly bool + + funcs map[string]function.Function + funcsLock sync.Mutex +} diff --git a/vendor/github.com/hashicorp/terraform/plans/action.go b/vendor/github.com/hashicorp/terraform/plans/action.go new file mode 100644 index 0000000000..c3e6a32aee --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/action.go @@ -0,0 +1,22 @@ +package plans + +type Action rune + +const ( + NoOp Action = 0 + Create Action = '+' + Read Action = '←' + Update Action = '~' + DeleteThenCreate Action = '∓' + CreateThenDelete Action = '±' + Delete Action = '-' +) + +//go:generate stringer -type Action + +// IsReplace returns true if the action is one of the two actions that +// represents replacing an existing object with a new object: +// DeleteThenCreate or CreateThenDelete. +func (a Action) IsReplace() bool { + return a == DeleteThenCreate || a == CreateThenDelete +} diff --git a/vendor/github.com/hashicorp/terraform/plans/action_string.go b/vendor/github.com/hashicorp/terraform/plans/action_string.go new file mode 100644 index 0000000000..c79465a2ae --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/action_string.go @@ -0,0 +1,36 @@ +// Code generated by "stringer -type Action"; DO NOT EDIT. + +package plans + +import "strconv" + +const ( + _Action_name_0 = "NoOp" + _Action_name_1 = "Create" + _Action_name_2 = "Delete" + _Action_name_3 = "Update" + _Action_name_4 = "CreateThenDelete" + _Action_name_5 = "Read" + _Action_name_6 = "DeleteThenCreate" +) + +func (i Action) String() string { + switch { + case i == 0: + return _Action_name_0 + case i == 43: + return _Action_name_1 + case i == 45: + return _Action_name_2 + case i == 126: + return _Action_name_3 + case i == 177: + return _Action_name_4 + case i == 8592: + return _Action_name_5 + case i == 8723: + return _Action_name_6 + default: + return "Action(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/hashicorp/terraform/plans/changes.go b/vendor/github.com/hashicorp/terraform/plans/changes.go new file mode 100644 index 0000000000..d7e0dcdb89 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/changes.go @@ -0,0 +1,308 @@ +package plans + +import ( + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" + "github.com/zclconf/go-cty/cty" +) + +// Changes describes various actions that Terraform will attempt to take if +// the corresponding plan is applied. +// +// A Changes object can be rendered into a visual diff (by the caller, using +// code in another package) for display to the user. +type Changes struct { + // Resources tracks planned changes to resource instance objects. + Resources []*ResourceInstanceChangeSrc + + // Outputs tracks planned changes output values. + // + // Note that although an in-memory plan contains planned changes for + // outputs throughout the configuration, a plan serialized + // to disk retains only the root outputs because they are + // externally-visible, while other outputs are implementation details and + // can be easily re-calculated during the apply phase. Therefore only root + // module outputs will survive a round-trip through a plan file. + Outputs []*OutputChangeSrc +} + +// NewChanges returns a valid Changes object that describes no changes. +func NewChanges() *Changes { + return &Changes{} +} + +func (c *Changes) Empty() bool { + for _, res := range c.Resources { + if res.Action != NoOp { + return false + } + } + return true +} + +// ResourceInstance returns the planned change for the current object of the +// resource instance of the given address, if any. Returns nil if no change is +// planned. +func (c *Changes) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstanceChangeSrc { + addrStr := addr.String() + for _, rc := range c.Resources { + if rc.Addr.String() == addrStr && rc.DeposedKey == states.NotDeposed { + return rc + } + } + + return nil +} + +// ResourceInstanceDeposed returns the plan change of a deposed object of +// the resource instance of the given address, if any. Returns nil if no change +// is planned. +func (c *Changes) ResourceInstanceDeposed(addr addrs.AbsResourceInstance, key states.DeposedKey) *ResourceInstanceChangeSrc { + addrStr := addr.String() + for _, rc := range c.Resources { + if rc.Addr.String() == addrStr && rc.DeposedKey == key { + return rc + } + } + + return nil +} + +// OutputValue returns the planned change for the output value with the +// given address, if any. Returns nil if no change is planned. +func (c *Changes) OutputValue(addr addrs.AbsOutputValue) *OutputChangeSrc { + addrStr := addr.String() + for _, oc := range c.Outputs { + if oc.Addr.String() == addrStr { + return oc + } + } + + return nil +} + +// SyncWrapper returns a wrapper object around the receiver that can be used +// to make certain changes to the receiver in a concurrency-safe way, as long +// as all callers share the same wrapper object. +func (c *Changes) SyncWrapper() *ChangesSync { + return &ChangesSync{ + changes: c, + } +} + +// ResourceInstanceChange describes a change to a particular resource instance +// object. +type ResourceInstanceChange struct { + // Addr is the absolute address of the resource instance that the change + // will apply to. + Addr addrs.AbsResourceInstance + + // DeposedKey is the identifier for a deposed object associated with the + // given instance, or states.NotDeposed if this change applies to the + // current object. + // + // A Replace change for a resource with create_before_destroy set will + // create a new DeposedKey temporarily during replacement. In that case, + // DeposedKey in the plan is always states.NotDeposed, representing that + // the current object is being replaced with the deposed. + DeposedKey states.DeposedKey + + // Provider is the address of the provider configuration that was used + // to plan this change, and thus the configuration that must also be + // used to apply it. + ProviderAddr addrs.AbsProviderConfig + + // Change is an embedded description of the change. + Change + + // RequiredReplace is a set of paths that caused the change action to be + // Replace rather than Update. Always nil if the change action is not + // Replace. + // + // This is retained only for UI-plan-rendering purposes and so it does not + // currently survive a round-trip through a saved plan file. + RequiredReplace cty.PathSet + + // Private allows a provider to stash any extra data that is opaque to + // Terraform that relates to this change. Terraform will save this + // byte-for-byte and return it to the provider in the apply call. + Private []byte +} + +// Encode produces a variant of the reciever that has its change values +// serialized so it can be written to a plan file. Pass the implied type of the +// corresponding resource type schema for correct operation. +func (rc *ResourceInstanceChange) Encode(ty cty.Type) (*ResourceInstanceChangeSrc, error) { + cs, err := rc.Change.Encode(ty) + if err != nil { + return nil, err + } + return &ResourceInstanceChangeSrc{ + Addr: rc.Addr, + DeposedKey: rc.DeposedKey, + ProviderAddr: rc.ProviderAddr, + ChangeSrc: *cs, + RequiredReplace: rc.RequiredReplace, + Private: rc.Private, + }, err +} + +// Simplify will, where possible, produce a change with a simpler action than +// the receiever given a flag indicating whether the caller is dealing with +// a normal apply or a destroy. This flag deals with the fact that Terraform +// Core uses a specialized graph node type for destroying; only that +// specialized node should set "destroying" to true. +// +// The following table shows the simplification behavior: +// +// Action Destroying? New Action +// --------+-------------+----------- +// Create true NoOp +// Delete false NoOp +// Replace true Delete +// Replace false Create +// +// For any combination not in the above table, the Simplify just returns the +// receiver as-is. +func (rc *ResourceInstanceChange) Simplify(destroying bool) *ResourceInstanceChange { + if destroying { + switch rc.Action { + case Delete: + // We'll fall out and just return rc verbatim, then. + case CreateThenDelete, DeleteThenCreate: + return &ResourceInstanceChange{ + Addr: rc.Addr, + DeposedKey: rc.DeposedKey, + Private: rc.Private, + ProviderAddr: rc.ProviderAddr, + Change: Change{ + Action: Delete, + Before: rc.Before, + After: cty.NullVal(rc.Before.Type()), + }, + } + default: + return &ResourceInstanceChange{ + Addr: rc.Addr, + DeposedKey: rc.DeposedKey, + Private: rc.Private, + ProviderAddr: rc.ProviderAddr, + Change: Change{ + Action: NoOp, + Before: rc.Before, + After: rc.Before, + }, + } + } + } else { + switch rc.Action { + case Delete: + return &ResourceInstanceChange{ + Addr: rc.Addr, + DeposedKey: rc.DeposedKey, + Private: rc.Private, + ProviderAddr: rc.ProviderAddr, + Change: Change{ + Action: NoOp, + Before: rc.Before, + After: rc.Before, + }, + } + case CreateThenDelete, DeleteThenCreate: + return &ResourceInstanceChange{ + Addr: rc.Addr, + DeposedKey: rc.DeposedKey, + Private: rc.Private, + ProviderAddr: rc.ProviderAddr, + Change: Change{ + Action: Create, + Before: cty.NullVal(rc.After.Type()), + After: rc.After, + }, + } + } + } + + // If we fall out here then our change is already simple enough. + return rc +} + +// OutputChange describes a change to an output value. +type OutputChange struct { + // Addr is the absolute address of the output value that the change + // will apply to. + Addr addrs.AbsOutputValue + + // Change is an embedded description of the change. + // + // For output value changes, the type constraint for the DynamicValue + // instances is always cty.DynamicPseudoType. + Change + + // Sensitive, if true, indicates that either the old or new value in the + // change is sensitive and so a rendered version of the plan in the UI + // should elide the actual values while still indicating the action of the + // change. + Sensitive bool +} + +// Encode produces a variant of the reciever that has its change values +// serialized so it can be written to a plan file. +func (oc *OutputChange) Encode() (*OutputChangeSrc, error) { + cs, err := oc.Change.Encode(cty.DynamicPseudoType) + if err != nil { + return nil, err + } + return &OutputChangeSrc{ + Addr: oc.Addr, + ChangeSrc: *cs, + Sensitive: oc.Sensitive, + }, err +} + +// Change describes a single change with a given action. +type Change struct { + // Action defines what kind of change is being made. + Action Action + + // Interpretation of Before and After depend on Action: + // + // NoOp Before and After are the same, unchanged value + // Create Before is nil, and After is the expected value after create. + // Read Before is any prior value (nil if no prior), and After is the + // value that was or will be read. + // Update Before is the value prior to update, and After is the expected + // value after update. + // Replace As with Update. + // Delete Before is the value prior to delete, and After is always nil. + // + // Unknown values may appear anywhere within the Before and After values, + // either as the values themselves or as nested elements within known + // collections/structures. + Before, After cty.Value +} + +// Encode produces a variant of the reciever that has its change values +// serialized so it can be written to a plan file. Pass the type constraint +// that the values are expected to conform to; to properly decode the values +// later an identical type constraint must be provided at that time. +// +// Where a Change is embedded in some other struct, it's generally better +// to call the corresponding Encode method of that struct rather than working +// directly with its embedded Change. +func (c *Change) Encode(ty cty.Type) (*ChangeSrc, error) { + beforeDV, err := NewDynamicValue(c.Before, ty) + if err != nil { + return nil, err + } + afterDV, err := NewDynamicValue(c.After, ty) + if err != nil { + return nil, err + } + + return &ChangeSrc{ + Action: c.Action, + Before: beforeDV, + After: afterDV, + }, nil +} diff --git a/vendor/github.com/hashicorp/terraform/plans/changes_src.go b/vendor/github.com/hashicorp/terraform/plans/changes_src.go new file mode 100644 index 0000000000..90153ea7ba --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/changes_src.go @@ -0,0 +1,190 @@ +package plans + +import ( + "fmt" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" + "github.com/zclconf/go-cty/cty" +) + +// ResourceInstanceChangeSrc is a not-yet-decoded ResourceInstanceChange. +// Pass the associated resource type's schema type to method Decode to +// obtain a ResourceInstancChange. +type ResourceInstanceChangeSrc struct { + // Addr is the absolute address of the resource instance that the change + // will apply to. + Addr addrs.AbsResourceInstance + + // DeposedKey is the identifier for a deposed object associated with the + // given instance, or states.NotDeposed if this change applies to the + // current object. + // + // A Replace change for a resource with create_before_destroy set will + // create a new DeposedKey temporarily during replacement. In that case, + // DeposedKey in the plan is always states.NotDeposed, representing that + // the current object is being replaced with the deposed. + DeposedKey states.DeposedKey + + // Provider is the address of the provider configuration that was used + // to plan this change, and thus the configuration that must also be + // used to apply it. + ProviderAddr addrs.AbsProviderConfig + + // ChangeSrc is an embedded description of the not-yet-decoded change. + ChangeSrc + + // RequiredReplace is a set of paths that caused the change action to be + // Replace rather than Update. Always nil if the change action is not + // Replace. + // + // This is retained only for UI-plan-rendering purposes and so it does not + // currently survive a round-trip through a saved plan file. + RequiredReplace cty.PathSet + + // Private allows a provider to stash any extra data that is opaque to + // Terraform that relates to this change. Terraform will save this + // byte-for-byte and return it to the provider in the apply call. + Private []byte +} + +// Decode unmarshals the raw representation of the instance object being +// changed. Pass the implied type of the corresponding resource type schema +// for correct operation. +func (rcs *ResourceInstanceChangeSrc) Decode(ty cty.Type) (*ResourceInstanceChange, error) { + change, err := rcs.ChangeSrc.Decode(ty) + if err != nil { + return nil, err + } + return &ResourceInstanceChange{ + Addr: rcs.Addr, + DeposedKey: rcs.DeposedKey, + ProviderAddr: rcs.ProviderAddr, + Change: *change, + RequiredReplace: rcs.RequiredReplace, + Private: rcs.Private, + }, nil +} + +// DeepCopy creates a copy of the receiver where any pointers to nested mutable +// values are also copied, thus ensuring that future mutations of the receiver +// will not affect the copy. +// +// Some types used within a resource change are immutable by convention even +// though the Go language allows them to be mutated, such as the types from +// the addrs package. These are _not_ copied by this method, under the +// assumption that callers will behave themselves. +func (rcs *ResourceInstanceChangeSrc) DeepCopy() *ResourceInstanceChangeSrc { + if rcs == nil { + return nil + } + ret := *rcs + + ret.RequiredReplace = cty.NewPathSet(ret.RequiredReplace.List()...) + + if len(ret.Private) != 0 { + private := make([]byte, len(ret.Private)) + copy(private, ret.Private) + ret.Private = private + } + + ret.ChangeSrc.Before = ret.ChangeSrc.Before.Copy() + ret.ChangeSrc.After = ret.ChangeSrc.After.Copy() + + return &ret +} + +// OutputChangeSrc describes a change to an output value. +type OutputChangeSrc struct { + // Addr is the absolute address of the output value that the change + // will apply to. + Addr addrs.AbsOutputValue + + // ChangeSrc is an embedded description of the not-yet-decoded change. + // + // For output value changes, the type constraint for the DynamicValue + // instances is always cty.DynamicPseudoType. + ChangeSrc + + // Sensitive, if true, indicates that either the old or new value in the + // change is sensitive and so a rendered version of the plan in the UI + // should elide the actual values while still indicating the action of the + // change. + Sensitive bool +} + +// Decode unmarshals the raw representation of the output value being +// changed. +func (ocs *OutputChangeSrc) Decode() (*OutputChange, error) { + change, err := ocs.ChangeSrc.Decode(cty.DynamicPseudoType) + if err != nil { + return nil, err + } + return &OutputChange{ + Addr: ocs.Addr, + Change: *change, + Sensitive: ocs.Sensitive, + }, nil +} + +// DeepCopy creates a copy of the receiver where any pointers to nested mutable +// values are also copied, thus ensuring that future mutations of the receiver +// will not affect the copy. +// +// Some types used within a resource change are immutable by convention even +// though the Go language allows them to be mutated, such as the types from +// the addrs package. These are _not_ copied by this method, under the +// assumption that callers will behave themselves. +func (ocs *OutputChangeSrc) DeepCopy() *OutputChangeSrc { + if ocs == nil { + return nil + } + ret := *ocs + + ret.ChangeSrc.Before = ret.ChangeSrc.Before.Copy() + ret.ChangeSrc.After = ret.ChangeSrc.After.Copy() + + return &ret +} + +// ChangeSrc is a not-yet-decoded Change. +type ChangeSrc struct { + // Action defines what kind of change is being made. + Action Action + + // Before and After correspond to the fields of the same name in Change, + // but have not yet been decoded from the serialized value used for + // storage. + Before, After DynamicValue +} + +// Decode unmarshals the raw representations of the before and after values +// to produce a Change object. Pass the type constraint that the result must +// conform to. +// +// Where a ChangeSrc is embedded in some other struct, it's generally better +// to call the corresponding Decode method of that struct rather than working +// directly with its embedded Change. +func (cs *ChangeSrc) Decode(ty cty.Type) (*Change, error) { + var err error + before := cty.NullVal(ty) + after := cty.NullVal(ty) + + if len(cs.Before) > 0 { + before, err = cs.Before.Decode(ty) + if err != nil { + return nil, fmt.Errorf("error decoding 'before' value: %s", err) + } + } + if len(cs.After) > 0 { + after, err = cs.After.Decode(ty) + if err != nil { + return nil, fmt.Errorf("error decoding 'after' value: %s", err) + } + } + return &Change{ + Action: cs.Action, + Before: before, + After: after, + }, nil +} diff --git a/vendor/github.com/hashicorp/terraform/plans/changes_state.go b/vendor/github.com/hashicorp/terraform/plans/changes_state.go new file mode 100644 index 0000000000..543e6c2bd8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/changes_state.go @@ -0,0 +1,15 @@ +package plans + +import ( + "github.com/hashicorp/terraform/states" +) + +// PlannedState merges the set of changes described by the receiver into the +// given prior state to produce the planned result state. +// +// The result is an approximation of the state as it would exist after +// applying these changes, omitting any values that cannot be determined until +// the changes are actually applied. +func (c *Changes) PlannedState(prior *states.State) (*states.State, error) { + panic("Changes.PlannedState not yet implemented") +} diff --git a/vendor/github.com/hashicorp/terraform/plans/changes_sync.go b/vendor/github.com/hashicorp/terraform/plans/changes_sync.go new file mode 100644 index 0000000000..6b4ff98fff --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/changes_sync.go @@ -0,0 +1,144 @@ +package plans + +import ( + "fmt" + "sync" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" +) + +// ChangesSync is a wrapper around a Changes that provides a concurrency-safe +// interface to insert new changes and retrieve copies of existing changes. +// +// Each ChangesSync is independent of all others, so all concurrent writers +// to a particular Changes must share a single ChangesSync. Behavior is +// undefined if any other caller makes changes to the underlying Changes +// object or its nested objects concurrently with any of the methods of a +// particular ChangesSync. +type ChangesSync struct { + lock sync.Mutex + changes *Changes +} + +// AppendResourceInstanceChange records the given resource instance change in +// the set of planned resource changes. +// +// The caller must ensure that there are no concurrent writes to the given +// change while this method is running, but it is safe to resume mutating +// it after this method returns without affecting the saved change. +func (cs *ChangesSync) AppendResourceInstanceChange(changeSrc *ResourceInstanceChangeSrc) { + if cs == nil { + panic("AppendResourceInstanceChange on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + s := changeSrc.DeepCopy() + cs.changes.Resources = append(cs.changes.Resources, s) +} + +// GetResourceInstanceChange searches the set of resource instance changes for +// one matching the given address and generation, returning it if it exists. +// +// If no such change exists, nil is returned. +// +// The returned object is a deep copy of the change recorded in the plan, so +// callers may mutate it although it's generally better (less confusing) to +// treat planned changes as immutable after they've been initially constructed. +func (cs *ChangesSync) GetResourceInstanceChange(addr addrs.AbsResourceInstance, gen states.Generation) *ResourceInstanceChangeSrc { + if cs == nil { + panic("GetResourceInstanceChange on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + if gen == states.CurrentGen { + return cs.changes.ResourceInstance(addr).DeepCopy() + } + if dk, ok := gen.(states.DeposedKey); ok { + return cs.changes.ResourceInstanceDeposed(addr, dk).DeepCopy() + } + panic(fmt.Sprintf("unsupported generation value %#v", gen)) +} + +// RemoveResourceInstanceChange searches the set of resource instance changes +// for one matching the given address and generation, and removes it from the +// set if it exists. +func (cs *ChangesSync) RemoveResourceInstanceChange(addr addrs.AbsResourceInstance, gen states.Generation) { + if cs == nil { + panic("RemoveResourceInstanceChange on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + dk := states.NotDeposed + if realDK, ok := gen.(states.DeposedKey); ok { + dk = realDK + } + + addrStr := addr.String() + for i, r := range cs.changes.Resources { + if r.Addr.String() != addrStr || r.DeposedKey != dk { + continue + } + copy(cs.changes.Resources[i:], cs.changes.Resources[i+1:]) + cs.changes.Resources = cs.changes.Resources[:len(cs.changes.Resources)-1] + return + } +} + +// AppendOutputChange records the given output value change in the set of +// planned value changes. +// +// The caller must ensure that there are no concurrent writes to the given +// change while this method is running, but it is safe to resume mutating +// it after this method returns without affecting the saved change. +func (cs *ChangesSync) AppendOutputChange(changeSrc *OutputChangeSrc) { + if cs == nil { + panic("AppendOutputChange on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + s := changeSrc.DeepCopy() + cs.changes.Outputs = append(cs.changes.Outputs, s) +} + +// GetOutputChange searches the set of output value changes for one matching +// the given address, returning it if it exists. +// +// If no such change exists, nil is returned. +// +// The returned object is a deep copy of the change recorded in the plan, so +// callers may mutate it although it's generally better (less confusing) to +// treat planned changes as immutable after they've been initially constructed. +func (cs *ChangesSync) GetOutputChange(addr addrs.AbsOutputValue) *OutputChangeSrc { + if cs == nil { + panic("GetOutputChange on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + return cs.changes.OutputValue(addr) +} + +// RemoveOutputChange searches the set of output value changes for one matching +// the given address, and removes it from the set if it exists. +func (cs *ChangesSync) RemoveOutputChange(addr addrs.AbsOutputValue) { + if cs == nil { + panic("RemoveOutputChange on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + addrStr := addr.String() + for i, o := range cs.changes.Outputs { + if o.Addr.String() != addrStr { + continue + } + copy(cs.changes.Outputs[i:], cs.changes.Outputs[i+1:]) + cs.changes.Outputs = cs.changes.Outputs[:len(cs.changes.Outputs)-1] + return + } +} diff --git a/vendor/github.com/hashicorp/terraform/plans/doc.go b/vendor/github.com/hashicorp/terraform/plans/doc.go new file mode 100644 index 0000000000..01ca389238 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/doc.go @@ -0,0 +1,5 @@ +// Package plans contains the types that are used to represent Terraform plans. +// +// A plan describes a set of changes that Terraform will make to update remote +// objects to match with changes to the configuration. +package plans diff --git a/vendor/github.com/hashicorp/terraform/plans/dynamic_value.go b/vendor/github.com/hashicorp/terraform/plans/dynamic_value.go new file mode 100644 index 0000000000..51fbb24cfb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/dynamic_value.go @@ -0,0 +1,96 @@ +package plans + +import ( + "github.com/zclconf/go-cty/cty" + ctymsgpack "github.com/zclconf/go-cty/cty/msgpack" +) + +// DynamicValue is the representation in the plan of a value whose type cannot +// be determined at compile time, such as because it comes from a schema +// defined in a plugin. +// +// This type is used as an indirection so that the overall plan structure can +// be decoded without schema available, and then the dynamic values accessed +// at a later time once the appropriate schema has been determined. +// +// Internally, DynamicValue is a serialized version of a cty.Value created +// against a particular type constraint. Callers should not access directly +// the serialized form, whose format may change in future. Values of this +// type must always be created by calling NewDynamicValue. +// +// The zero value of DynamicValue is nil, and represents the absense of a +// value within the Go type system. This is distinct from a cty.NullVal +// result, which represents the absense of a value within the cty type system. +type DynamicValue []byte + +// NewDynamicValue creates a DynamicValue by serializing the given value +// against the given type constraint. The value must conform to the type +// constraint, or the result is undefined. +// +// If the value to be encoded has no predefined schema (for example, for +// module output values and input variables), set the type constraint to +// cty.DynamicPseudoType in order to save type information as part of the +// value, and then also pass cty.DynamicPseudoType to method Decode to recover +// the original value. +// +// cty.NilVal can be used to represent the absense of a value, but callers +// must be careful to distinguish values that are absent at the Go layer +// (cty.NilVal) vs. values that are absent at the cty layer (cty.NullVal +// results). +func NewDynamicValue(val cty.Value, ty cty.Type) (DynamicValue, error) { + // If we're given cty.NilVal (the zero value of cty.Value, which is + // distinct from a typed null value created by cty.NullVal) then we'll + // assume the caller is trying to represent the _absense_ of a value, + // and so we'll return a nil DynamicValue. + if val == cty.NilVal { + return DynamicValue(nil), nil + } + + // Currently our internal encoding is msgpack, via ctymsgpack. + buf, err := ctymsgpack.Marshal(val, ty) + if err != nil { + return nil, err + } + + return DynamicValue(buf), nil +} + +// Decode retrieves the effective value from the receiever by interpreting the +// serialized form against the given type constraint. For correct results, +// the type constraint must match (or be consistent with) the one that was +// used to create the receiver. +// +// A nil DynamicValue decodes to cty.NilVal, which is not a valid value and +// instead represents the absense of a value. +func (v DynamicValue) Decode(ty cty.Type) (cty.Value, error) { + if v == nil { + return cty.NilVal, nil + } + + return ctymsgpack.Unmarshal([]byte(v), ty) +} + +// ImpliedType returns the type implied by the serialized structure of the +// receiving value. +// +// This will not necessarily be exactly the type that was given when the +// value was encoded, and in particular must not be used for values that +// were encoded with their static type given as cty.DynamicPseudoType. +// It is however safe to use this method for values that were encoded using +// their runtime type as the conforming type, with the result being +// semantically equivalent but with all lists and sets represented as tuples, +// and maps as objects, due to ambiguities of the serialization. +func (v DynamicValue) ImpliedType() (cty.Type, error) { + return ctymsgpack.ImpliedType([]byte(v)) +} + +// Copy produces a copy of the receiver with a distinct backing array. +func (v DynamicValue) Copy() DynamicValue { + if v == nil { + return nil + } + + ret := make(DynamicValue, len(v)) + copy(ret, v) + return ret +} diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/all_null.go b/vendor/github.com/hashicorp/terraform/plans/objchange/all_null.go new file mode 100644 index 0000000000..18a7e99a37 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/objchange/all_null.go @@ -0,0 +1,18 @@ +package objchange + +import ( + "github.com/hashicorp/terraform/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// AllAttributesNull constructs a non-null cty.Value of the object type implied +// by the given schema that has all of its leaf attributes set to null and all +// of its nested block collections set to zero-length. +// +// This simulates what would result from decoding an empty configuration block +// with the given schema, except that it does not produce errors +func AllAttributesNull(schema *configschema.Block) cty.Value { + // "All attributes null" happens to be the definition of EmptyValue for + // a Block, so we can just delegate to that. + return schema.EmptyValue() +} diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/compatible.go b/vendor/github.com/hashicorp/terraform/plans/objchange/compatible.go new file mode 100644 index 0000000000..58e943042c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/objchange/compatible.go @@ -0,0 +1,376 @@ +package objchange + +import ( + "fmt" + "strconv" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + + "github.com/hashicorp/terraform/configs/configschema" +) + +// AssertObjectCompatible checks whether the given "actual" value is a valid +// completion of the possibly-partially-unknown "planned" value. +// +// This means that any known leaf value in "planned" must be equal to the +// corresponding value in "actual", and various other similar constraints. +// +// Any inconsistencies are reported by returning a non-zero number of errors. +// These errors are usually (but not necessarily) cty.PathError values +// referring to a particular nested value within the "actual" value. +// +// The two values must have types that conform to the given schema's implied +// type, or this function will panic. +func AssertObjectCompatible(schema *configschema.Block, planned, actual cty.Value) []error { + return assertObjectCompatible(schema, planned, actual, nil) +} + +func assertObjectCompatible(schema *configschema.Block, planned, actual cty.Value, path cty.Path) []error { + var errs []error + if planned.IsNull() && !actual.IsNull() { + errs = append(errs, path.NewErrorf("was absent, but now present")) + return errs + } + if actual.IsNull() && !planned.IsNull() { + errs = append(errs, path.NewErrorf("was present, but now absent")) + return errs + } + if planned.IsNull() { + // No further checks possible if both values are null + return errs + } + + for name, attrS := range schema.Attributes { + plannedV := planned.GetAttr(name) + actualV := actual.GetAttr(name) + + path := append(path, cty.GetAttrStep{Name: name}) + moreErrs := assertValueCompatible(plannedV, actualV, path) + if attrS.Sensitive { + if len(moreErrs) > 0 { + // Use a vague placeholder message instead, to avoid disclosing + // sensitive information. + errs = append(errs, path.NewErrorf("inconsistent values for sensitive attribute")) + } + } else { + errs = append(errs, moreErrs...) + } + } + for name, blockS := range schema.BlockTypes { + plannedV := planned.GetAttr(name) + actualV := actual.GetAttr(name) + + // As a special case, we permit a "planned" block with exactly one + // element where all of the "leaf" values are unknown, since that's + // what HCL's dynamic block extension generates if the for_each + // expression is itself unknown and thus it cannot predict how many + // child blocks will get created. + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + if allLeafValuesUnknown(plannedV) && !plannedV.IsNull() { + return errs + } + case configschema.NestingList, configschema.NestingMap, configschema.NestingSet: + if plannedV.IsKnown() && !plannedV.IsNull() && plannedV.LengthInt() == 1 { + elemVs := plannedV.AsValueSlice() + if allLeafValuesUnknown(elemVs[0]) { + return errs + } + } + default: + panic(fmt.Sprintf("unsupported nesting mode %s", blockS.Nesting)) + } + + path := append(path, cty.GetAttrStep{Name: name}) + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + moreErrs := assertObjectCompatible(&blockS.Block, plannedV, actualV, path) + errs = append(errs, moreErrs...) + case configschema.NestingList: + // A NestingList might either be a list or a tuple, depending on + // whether there are dynamically-typed attributes inside. However, + // both support a similar-enough API that we can treat them the + // same for our purposes here. + if !plannedV.IsKnown() || plannedV.IsNull() || actualV.IsNull() { + continue + } + + plannedL := plannedV.LengthInt() + actualL := actualV.LengthInt() + if plannedL != actualL { + errs = append(errs, path.NewErrorf("block count changed from %d to %d", plannedL, actualL)) + continue + } + for it := plannedV.ElementIterator(); it.Next(); { + idx, plannedEV := it.Element() + if !actualV.HasIndex(idx).True() { + continue + } + actualEV := actualV.Index(idx) + moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: idx})) + errs = append(errs, moreErrs...) + } + case configschema.NestingMap: + // A NestingMap might either be a map or an object, depending on + // whether there are dynamically-typed attributes inside, but + // that's decided statically and so both values will have the same + // kind. + if plannedV.Type().IsObjectType() { + plannedAtys := plannedV.Type().AttributeTypes() + actualAtys := actualV.Type().AttributeTypes() + for k := range plannedAtys { + if _, ok := actualAtys[k]; !ok { + errs = append(errs, path.NewErrorf("block key %q has vanished", k)) + continue + } + + plannedEV := plannedV.GetAttr(k) + actualEV := actualV.GetAttr(k) + moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.GetAttrStep{Name: k})) + errs = append(errs, moreErrs...) + } + for k := range actualAtys { + if _, ok := plannedAtys[k]; !ok { + errs = append(errs, path.NewErrorf("new block key %q has appeared", k)) + continue + } + } + } else { + if !plannedV.IsKnown() || plannedV.IsNull() || actualV.IsNull() { + continue + } + plannedL := plannedV.LengthInt() + actualL := actualV.LengthInt() + if plannedL != actualL { + errs = append(errs, path.NewErrorf("block count changed from %d to %d", plannedL, actualL)) + continue + } + for it := plannedV.ElementIterator(); it.Next(); { + idx, plannedEV := it.Element() + if !actualV.HasIndex(idx).True() { + continue + } + actualEV := actualV.Index(idx) + moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: idx})) + errs = append(errs, moreErrs...) + } + } + case configschema.NestingSet: + if !plannedV.IsKnown() || !actualV.IsKnown() || plannedV.IsNull() || actualV.IsNull() { + continue + } + + setErrs := assertSetValuesCompatible(plannedV, actualV, path, func(plannedEV, actualEV cty.Value) bool { + errs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: actualEV})) + return len(errs) == 0 + }) + errs = append(errs, setErrs...) + + // There can be fewer elements in a set after its elements are all + // known (values that turn out to be equal will coalesce) but the + // number of elements must never get larger. + plannedL := plannedV.LengthInt() + actualL := actualV.LengthInt() + if plannedL < actualL { + errs = append(errs, path.NewErrorf("block set length changed from %d to %d", plannedL, actualL)) + } + default: + panic(fmt.Sprintf("unsupported nesting mode %s", blockS.Nesting)) + } + } + return errs +} + +func assertValueCompatible(planned, actual cty.Value, path cty.Path) []error { + // NOTE: We don't normally use the GoString rendering of cty.Value in + // user-facing error messages as a rule, but we make an exception + // for this function because we expect the user to pass this message on + // verbatim to the provider development team and so more detail is better. + + var errs []error + if planned.Type() == cty.DynamicPseudoType { + // Anything goes, then + return errs + } + if problems := planned.Type().TestConformance(actual.Type()); len(problems) > 0 { + errs = append(errs, path.NewErrorf("wrong final value type: %s", convert.MismatchMessage(actual.Type(), planned.Type()))) + // If the types don't match then we can't do any other comparisons, + // so we bail early. + return errs + } + + if !planned.IsKnown() { + // We didn't know what were going to end up with during plan, so + // anything goes during apply. + return errs + } + + if actual.IsNull() { + if planned.IsNull() { + return nil + } + errs = append(errs, path.NewErrorf("was %#v, but now null", planned)) + return errs + } + if planned.IsNull() { + errs = append(errs, path.NewErrorf("was null, but now %#v", actual)) + return errs + } + + ty := planned.Type() + switch { + + case !actual.IsKnown(): + errs = append(errs, path.NewErrorf("was known, but now unknown")) + + case ty.IsPrimitiveType(): + if !actual.Equals(planned).True() { + errs = append(errs, path.NewErrorf("was %#v, but now %#v", planned, actual)) + } + + case ty.IsListType() || ty.IsMapType() || ty.IsTupleType(): + for it := planned.ElementIterator(); it.Next(); { + k, plannedV := it.Element() + if !actual.HasIndex(k).True() { + errs = append(errs, path.NewErrorf("element %s has vanished", indexStrForErrors(k))) + continue + } + + actualV := actual.Index(k) + moreErrs := assertValueCompatible(plannedV, actualV, append(path, cty.IndexStep{Key: k})) + errs = append(errs, moreErrs...) + } + + for it := actual.ElementIterator(); it.Next(); { + k, _ := it.Element() + if !planned.HasIndex(k).True() { + errs = append(errs, path.NewErrorf("new element %s has appeared", indexStrForErrors(k))) + } + } + + case ty.IsObjectType(): + atys := ty.AttributeTypes() + for name := range atys { + // Because we already tested that the two values have the same type, + // we can assume that the same attributes are present in both and + // focus just on testing their values. + plannedV := planned.GetAttr(name) + actualV := actual.GetAttr(name) + moreErrs := assertValueCompatible(plannedV, actualV, append(path, cty.GetAttrStep{Name: name})) + errs = append(errs, moreErrs...) + } + + case ty.IsSetType(): + // We can't really do anything useful for sets here because changing + // an unknown element to known changes the identity of the element, and + // so we can't correlate them properly. However, we will at least check + // to ensure that the number of elements is consistent, along with + // the general type-match checks we ran earlier in this function. + if planned.IsKnown() && !planned.IsNull() && !actual.IsNull() { + + setErrs := assertSetValuesCompatible(planned, actual, path, func(plannedV, actualV cty.Value) bool { + errs := assertValueCompatible(plannedV, actualV, append(path, cty.IndexStep{Key: actualV})) + return len(errs) == 0 + }) + errs = append(errs, setErrs...) + + // There can be fewer elements in a set after its elements are all + // known (values that turn out to be equal will coalesce) but the + // number of elements must never get larger. + + plannedL := planned.LengthInt() + actualL := actual.LengthInt() + if plannedL < actualL { + errs = append(errs, path.NewErrorf("length changed from %d to %d", plannedL, actualL)) + } + } + } + + return errs +} + +func indexStrForErrors(v cty.Value) string { + switch v.Type() { + case cty.Number: + return v.AsBigFloat().Text('f', -1) + case cty.String: + return strconv.Quote(v.AsString()) + default: + // Should be impossible, since no other index types are allowed! + return fmt.Sprintf("%#v", v) + } +} + +func allLeafValuesUnknown(v cty.Value) bool { + seenKnownValue := false + cty.Walk(v, func(path cty.Path, cv cty.Value) (bool, error) { + if cv.IsNull() { + seenKnownValue = true + } + if cv.Type().IsPrimitiveType() && cv.IsKnown() { + seenKnownValue = true + } + return true, nil + }) + return !seenKnownValue +} + +// assertSetValuesCompatible checks that each of the elements in a can +// be correlated with at least one equivalent element in b and vice-versa, +// using the given correlation function. +// +// This allows the number of elements in the sets to change as long as all +// elements in both sets can be correlated, making this function safe to use +// with sets that may contain unknown values as long as the unknown case is +// addressed in some reasonable way in the callback function. +// +// The callback always recieves values from set a as its first argument and +// values from set b in its second argument, so it is safe to use with +// non-commutative functions. +// +// As with assertValueCompatible, we assume that the target audience of error +// messages here is a provider developer (via a bug report from a user) and so +// we intentionally violate our usual rule of keeping cty implementation +// details out of error messages. +func assertSetValuesCompatible(planned, actual cty.Value, path cty.Path, f func(aVal, bVal cty.Value) bool) []error { + a := planned + b := actual + + // Our methodology here is a little tricky, to deal with the fact that + // it's impossible to directly correlate two non-equal set elements because + // they don't have identities separate from their values. + // The approach is to count the number of equivalent elements each element + // of a has in b and vice-versa, and then return true only if each element + // in both sets has at least one equivalent. + as := a.AsValueSlice() + bs := b.AsValueSlice() + aeqs := make([]bool, len(as)) + beqs := make([]bool, len(bs)) + for ai, av := range as { + for bi, bv := range bs { + if f(av, bv) { + aeqs[ai] = true + beqs[bi] = true + } + } + } + + var errs []error + for i, eq := range aeqs { + if !eq { + errs = append(errs, path.NewErrorf("planned set element %#v does not correlate with any element in actual", as[i])) + } + } + if len(errs) > 0 { + // Exit early since otherwise we're likely to generate duplicate + // error messages from the other perspective in the subsequent loop. + return errs + } + for i, eq := range beqs { + if !eq { + errs = append(errs, path.NewErrorf("actual set element %#v does not correlate with any element in plan", bs[i])) + } + } + return errs +} diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/doc.go b/vendor/github.com/hashicorp/terraform/plans/objchange/doc.go new file mode 100644 index 0000000000..2c18a0108f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/objchange/doc.go @@ -0,0 +1,4 @@ +// Package objchange deals with the business logic of taking a prior state +// value and a config value and producing a proposed new merged value, along +// with other related rules in this domain. +package objchange diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/lcs.go b/vendor/github.com/hashicorp/terraform/plans/objchange/lcs.go new file mode 100644 index 0000000000..cbfefddddb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/objchange/lcs.go @@ -0,0 +1,104 @@ +package objchange + +import ( + "github.com/zclconf/go-cty/cty" +) + +// LongestCommonSubsequence finds a sequence of values that are common to both +// x and y, with the same relative ordering as in both collections. This result +// is useful as a first step towards computing a diff showing added/removed +// elements in a sequence. +// +// The approached used here is a "naive" one, assuming that both xs and ys will +// generally be small in most reasonable Terraform configurations. For larger +// lists the time/space usage may be sub-optimal. +// +// A pair of lists may have multiple longest common subsequences. In that +// case, the one selected by this function is undefined. +func LongestCommonSubsequence(xs, ys []cty.Value) []cty.Value { + if len(xs) == 0 || len(ys) == 0 { + return make([]cty.Value, 0) + } + + c := make([]int, len(xs)*len(ys)) + eqs := make([]bool, len(xs)*len(ys)) + w := len(xs) + + for y := 0; y < len(ys); y++ { + for x := 0; x < len(xs); x++ { + eqV := xs[x].Equals(ys[y]) + eq := false + if eqV.IsKnown() && eqV.True() { + eq = true + eqs[(w*y)+x] = true // equality tests can be expensive, so cache it + } + if eq { + // Sequence gets one longer than for the cell at top left, + // since we'd append a new item to the sequence here. + if x == 0 || y == 0 { + c[(w*y)+x] = 1 + } else { + c[(w*y)+x] = c[(w*(y-1))+(x-1)] + 1 + } + } else { + // We follow the longest of the sequence above and the sequence + // to the left of us in the matrix. + l := 0 + u := 0 + if x > 0 { + l = c[(w*y)+(x-1)] + } + if y > 0 { + u = c[(w*(y-1))+x] + } + if l > u { + c[(w*y)+x] = l + } else { + c[(w*y)+x] = u + } + } + } + } + + // The bottom right cell tells us how long our longest sequence will be + seq := make([]cty.Value, c[len(c)-1]) + + // Now we will walk back from the bottom right cell, finding again all + // of the equal pairs to construct our sequence. + x := len(xs) - 1 + y := len(ys) - 1 + i := len(seq) - 1 + + for x > -1 && y > -1 { + if eqs[(w*y)+x] { + // Add the value to our result list and then walk diagonally + // up and to the left. + seq[i] = xs[x] + x-- + y-- + i-- + } else { + // Take the path with the greatest sequence length in the matrix. + l := 0 + u := 0 + if x > 0 { + l = c[(w*y)+(x-1)] + } + if y > 0 { + u = c[(w*(y-1))+x] + } + if l > u { + x-- + } else { + y-- + } + } + } + + if i > -1 { + // should never happen if the matrix was constructed properly + panic("not enough elements in sequence") + } + + return seq +} diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/normalize_obj.go b/vendor/github.com/hashicorp/terraform/plans/objchange/normalize_obj.go new file mode 100644 index 0000000000..c23f44dac6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/objchange/normalize_obj.go @@ -0,0 +1,132 @@ +package objchange + +import ( + "github.com/hashicorp/terraform/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// NormalizeObjectFromLegacySDK takes an object that may have been generated +// by the legacy Terraform SDK (i.e. returned from a provider with the +// LegacyTypeSystem opt-out set) and does its best to normalize it for the +// assumptions we would normally enforce if the provider had not opted out. +// +// In particular, this function guarantees that a value representing a nested +// block will never itself be unknown or null, instead representing that as +// a non-null value that may contain null/unknown values. +// +// The input value must still conform to the implied type of the given schema, +// or else this function may produce garbage results or panic. This is usually +// okay because type consistency is enforced when deserializing the value +// returned from the provider over the RPC wire protocol anyway. +func NormalizeObjectFromLegacySDK(val cty.Value, schema *configschema.Block) cty.Value { + if val == cty.NilVal || val.IsNull() { + // This should never happen in reasonable use, but we'll allow it + // and normalize to a null of the expected type rather than panicking + // below. + return cty.NullVal(schema.ImpliedType()) + } + + vals := make(map[string]cty.Value) + for name := range schema.Attributes { + // No normalization for attributes, since them being type-conformant + // is all that we require. + vals[name] = val.GetAttr(name) + } + for name, blockS := range schema.BlockTypes { + lv := val.GetAttr(name) + + // Legacy SDK never generates dynamically-typed attributes and so our + // normalization code doesn't deal with them, but we need to make sure + // we still pass them through properly so that we don't interfere with + // objects generated by other SDKs. + if ty := blockS.Block.ImpliedType(); ty.HasDynamicTypes() { + vals[name] = lv + continue + } + + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + if lv.IsKnown() { + if lv.IsNull() && blockS.Nesting == configschema.NestingGroup { + vals[name] = blockS.EmptyValue() + } else { + vals[name] = NormalizeObjectFromLegacySDK(lv, &blockS.Block) + } + } else { + vals[name] = unknownBlockStub(&blockS.Block) + } + case configschema.NestingList: + switch { + case !lv.IsKnown(): + vals[name] = cty.ListVal([]cty.Value{unknownBlockStub(&blockS.Block)}) + case lv.IsNull() || lv.LengthInt() == 0: + vals[name] = cty.ListValEmpty(blockS.Block.ImpliedType()) + default: + subVals := make([]cty.Value, 0, lv.LengthInt()) + for it := lv.ElementIterator(); it.Next(); { + _, subVal := it.Element() + subVals = append(subVals, NormalizeObjectFromLegacySDK(subVal, &blockS.Block)) + } + vals[name] = cty.ListVal(subVals) + } + case configschema.NestingSet: + switch { + case !lv.IsKnown(): + vals[name] = cty.SetVal([]cty.Value{unknownBlockStub(&blockS.Block)}) + case lv.IsNull() || lv.LengthInt() == 0: + vals[name] = cty.SetValEmpty(blockS.Block.ImpliedType()) + default: + subVals := make([]cty.Value, 0, lv.LengthInt()) + for it := lv.ElementIterator(); it.Next(); { + _, subVal := it.Element() + subVals = append(subVals, NormalizeObjectFromLegacySDK(subVal, &blockS.Block)) + } + vals[name] = cty.SetVal(subVals) + } + default: + // The legacy SDK doesn't support NestingMap, so we just assume + // maps are always okay. (If not, we would've detected and returned + // an error to the user before we got here.) + vals[name] = lv + } + } + return cty.ObjectVal(vals) +} + +// unknownBlockStub constructs an object value that approximates an unknown +// block by producing a known block object with all of its leaf attribute +// values set to unknown. +// +// Blocks themselves cannot be unknown, so if the legacy SDK tries to return +// such a thing, we'll use this result instead. This convention mimics how +// the dynamic block feature deals with being asked to iterate over an unknown +// value, because our value-checking functions already accept this convention +// as a special case. +func unknownBlockStub(schema *configschema.Block) cty.Value { + vals := make(map[string]cty.Value) + for name, attrS := range schema.Attributes { + vals[name] = cty.UnknownVal(attrS.Type) + } + for name, blockS := range schema.BlockTypes { + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + vals[name] = unknownBlockStub(&blockS.Block) + case configschema.NestingList: + // In principle we may be expected to produce a tuple value here, + // if there are any dynamically-typed attributes in our nested block, + // but the legacy SDK doesn't support that, so we just assume it'll + // never be necessary to normalize those. (Incorrect usage in any + // other SDK would be caught and returned as an error before we + // get here.) + vals[name] = cty.ListVal([]cty.Value{unknownBlockStub(&blockS.Block)}) + case configschema.NestingSet: + vals[name] = cty.SetVal([]cty.Value{unknownBlockStub(&blockS.Block)}) + case configschema.NestingMap: + // A nesting map can never be unknown since we then wouldn't know + // what the keys are. (Legacy SDK doesn't support NestingMap anyway, + // so this should never arise.) + vals[name] = cty.MapValEmpty(blockS.Block.ImpliedType()) + } + } + return cty.ObjectVal(vals) +} diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/objchange.go b/vendor/github.com/hashicorp/terraform/plans/objchange/objchange.go new file mode 100644 index 0000000000..5a8af1481f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/objchange/objchange.go @@ -0,0 +1,390 @@ +package objchange + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/configs/configschema" +) + +// ProposedNewObject constructs a proposed new object value by combining the +// computed attribute values from "prior" with the configured attribute values +// from "config". +// +// Both value must conform to the given schema's implied type, or this function +// will panic. +// +// The prior value must be wholly known, but the config value may be unknown +// or have nested unknown values. +// +// The merging of the two objects includes the attributes of any nested blocks, +// which will be correlated in a manner appropriate for their nesting mode. +// Note in particular that the correlation for blocks backed by sets is a +// heuristic based on matching non-computed attribute values and so it may +// produce strange results with more "extreme" cases, such as a nested set +// block where _all_ attributes are computed. +func ProposedNewObject(schema *configschema.Block, prior, config cty.Value) cty.Value { + // If the config and prior are both null, return early here before + // populating the prior block. The prevents non-null blocks from appearing + // the proposed state value. + if config.IsNull() && prior.IsNull() { + return prior + } + + if prior.IsNull() { + // In this case, we will construct a synthetic prior value that is + // similar to the result of decoding an empty configuration block, + // which simplifies our handling of the top-level attributes/blocks + // below by giving us one non-null level of object to pull values from. + prior = AllAttributesNull(schema) + } + return proposedNewObject(schema, prior, config) +} + +// PlannedDataResourceObject is similar to ProposedNewObject but tailored for +// planning data resources in particular. Specifically, it replaces the values +// of any Computed attributes not set in the configuration with an unknown +// value, which serves as a placeholder for a value to be filled in by the +// provider when the data resource is finally read. +// +// Data resources are different because the planning of them is handled +// entirely within Terraform Core and not subject to customization by the +// provider. This function is, in effect, producing an equivalent result to +// passing the ProposedNewObject result into a provider's PlanResourceChange +// function, assuming a fixed implementation of PlanResourceChange that just +// fills in unknown values as needed. +func PlannedDataResourceObject(schema *configschema.Block, config cty.Value) cty.Value { + // Our trick here is to run the ProposedNewObject logic with an + // entirely-unknown prior value. Because of cty's unknown short-circuit + // behavior, any operation on prior returns another unknown, and so + // unknown values propagate into all of the parts of the resulting value + // that would normally be filled in by preserving the prior state. + prior := cty.UnknownVal(schema.ImpliedType()) + return proposedNewObject(schema, prior, config) +} + +func proposedNewObject(schema *configschema.Block, prior, config cty.Value) cty.Value { + if config.IsNull() || !config.IsKnown() { + // This is a weird situation, but we'll allow it anyway to free + // callers from needing to specifically check for these cases. + return prior + } + if (!prior.Type().IsObjectType()) || (!config.Type().IsObjectType()) { + panic("ProposedNewObject only supports object-typed values") + } + + // From this point onwards, we can assume that both values are non-null + // object types, and that the config value itself is known (though it + // may contain nested values that are unknown.) + + newAttrs := map[string]cty.Value{} + for name, attr := range schema.Attributes { + priorV := prior.GetAttr(name) + configV := config.GetAttr(name) + var newV cty.Value + switch { + case attr.Computed && attr.Optional: + // This is the trickiest scenario: we want to keep the prior value + // if the config isn't overriding it. Note that due to some + // ambiguity here, setting an optional+computed attribute from + // config and then later switching the config to null in a + // subsequent change causes the initial config value to be "sticky" + // unless the provider specifically overrides it during its own + // plan customization step. + if configV.IsNull() { + newV = priorV + } else { + newV = configV + } + case attr.Computed: + // configV will always be null in this case, by definition. + // priorV may also be null, but that's okay. + newV = priorV + default: + // For non-computed attributes, we always take the config value, + // even if it is null. If it's _required_ then null values + // should've been caught during an earlier validation step, and + // so we don't really care about that here. + newV = configV + } + newAttrs[name] = newV + } + + // Merging nested blocks is a little more complex, since we need to + // correlate blocks between both objects and then recursively propose + // a new object for each. The correlation logic depends on the nesting + // mode for each block type. + for name, blockType := range schema.BlockTypes { + priorV := prior.GetAttr(name) + configV := config.GetAttr(name) + var newV cty.Value + switch blockType.Nesting { + + case configschema.NestingSingle, configschema.NestingGroup: + newV = ProposedNewObject(&blockType.Block, priorV, configV) + + case configschema.NestingList: + // Nested blocks are correlated by index. + configVLen := 0 + if configV.IsKnown() && !configV.IsNull() { + configVLen = configV.LengthInt() + } + if configVLen > 0 { + newVals := make([]cty.Value, 0, configVLen) + for it := configV.ElementIterator(); it.Next(); { + idx, configEV := it.Element() + if priorV.IsKnown() && (priorV.IsNull() || !priorV.HasIndex(idx).True()) { + // If there is no corresponding prior element then + // we just take the config value as-is. + newVals = append(newVals, configEV) + continue + } + priorEV := priorV.Index(idx) + + newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) + newVals = append(newVals, newEV) + } + // Despite the name, a NestingList might also be a tuple, if + // its nested schema contains dynamically-typed attributes. + if configV.Type().IsTupleType() { + newV = cty.TupleVal(newVals) + } else { + newV = cty.ListVal(newVals) + } + } else { + // Despite the name, a NestingList might also be a tuple, if + // its nested schema contains dynamically-typed attributes. + if configV.Type().IsTupleType() { + newV = cty.EmptyTupleVal + } else { + newV = cty.ListValEmpty(blockType.ImpliedType()) + } + } + + case configschema.NestingMap: + // Despite the name, a NestingMap may produce either a map or + // object value, depending on whether the nested schema contains + // dynamically-typed attributes. + if configV.Type().IsObjectType() { + // Nested blocks are correlated by key. + configVLen := 0 + if configV.IsKnown() && !configV.IsNull() { + configVLen = configV.LengthInt() + } + if configVLen > 0 { + newVals := make(map[string]cty.Value, configVLen) + atys := configV.Type().AttributeTypes() + for name := range atys { + configEV := configV.GetAttr(name) + if !priorV.IsKnown() || priorV.IsNull() || !priorV.Type().HasAttribute(name) { + // If there is no corresponding prior element then + // we just take the config value as-is. + newVals[name] = configEV + continue + } + priorEV := priorV.GetAttr(name) + + newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) + newVals[name] = newEV + } + // Although we call the nesting mode "map", we actually use + // object values so that elements might have different types + // in case of dynamically-typed attributes. + newV = cty.ObjectVal(newVals) + } else { + newV = cty.EmptyObjectVal + } + } else { + configVLen := 0 + if configV.IsKnown() && !configV.IsNull() { + configVLen = configV.LengthInt() + } + if configVLen > 0 { + newVals := make(map[string]cty.Value, configVLen) + for it := configV.ElementIterator(); it.Next(); { + idx, configEV := it.Element() + k := idx.AsString() + if priorV.IsKnown() && (priorV.IsNull() || !priorV.HasIndex(idx).True()) { + // If there is no corresponding prior element then + // we just take the config value as-is. + newVals[k] = configEV + continue + } + priorEV := priorV.Index(idx) + + newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) + newVals[k] = newEV + } + newV = cty.MapVal(newVals) + } else { + newV = cty.MapValEmpty(blockType.ImpliedType()) + } + } + + case configschema.NestingSet: + if !configV.Type().IsSetType() { + panic("configschema.NestingSet value is not a set as expected") + } + + // Nested blocks are correlated by comparing the element values + // after eliminating all of the computed attributes. In practice, + // this means that any config change produces an entirely new + // nested object, and we only propagate prior computed values + // if the non-computed attribute values are identical. + var cmpVals [][2]cty.Value + if priorV.IsKnown() && !priorV.IsNull() { + cmpVals = setElementCompareValues(&blockType.Block, priorV, false) + } + configVLen := 0 + if configV.IsKnown() && !configV.IsNull() { + configVLen = configV.LengthInt() + } + if configVLen > 0 { + used := make([]bool, len(cmpVals)) // track used elements in case multiple have the same compare value + newVals := make([]cty.Value, 0, configVLen) + for it := configV.ElementIterator(); it.Next(); { + _, configEV := it.Element() + var priorEV cty.Value + for i, cmp := range cmpVals { + if used[i] { + continue + } + if cmp[1].RawEquals(configEV) { + priorEV = cmp[0] + used[i] = true // we can't use this value on a future iteration + break + } + } + if priorEV == cty.NilVal { + priorEV = cty.NullVal(blockType.ImpliedType()) + } + + newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) + newVals = append(newVals, newEV) + } + newV = cty.SetVal(newVals) + } else { + newV = cty.SetValEmpty(blockType.Block.ImpliedType()) + } + + default: + // Should never happen, since the above cases are comprehensive. + panic(fmt.Sprintf("unsupported block nesting mode %s", blockType.Nesting)) + } + + newAttrs[name] = newV + } + + return cty.ObjectVal(newAttrs) +} + +// setElementCompareValues takes a known, non-null value of a cty.Set type and +// returns a table -- constructed of two-element arrays -- that maps original +// set element values to corresponding values that have all of the computed +// values removed, making them suitable for comparison with values obtained +// from configuration. The element type of the set must conform to the implied +// type of the given schema, or this function will panic. +// +// In the resulting slice, the zeroth element of each array is the original +// value and the one-indexed element is the corresponding "compare value". +// +// This is intended to help correlate prior elements with configured elements +// in ProposedNewObject. The result is a heuristic rather than an exact science, +// since e.g. two separate elements may reduce to the same value through this +// process. The caller must therefore be ready to deal with duplicates. +func setElementCompareValues(schema *configschema.Block, set cty.Value, isConfig bool) [][2]cty.Value { + ret := make([][2]cty.Value, 0, set.LengthInt()) + for it := set.ElementIterator(); it.Next(); { + _, ev := it.Element() + ret = append(ret, [2]cty.Value{ev, setElementCompareValue(schema, ev, isConfig)}) + } + return ret +} + +// setElementCompareValue creates a new value that has all of the same +// non-computed attribute values as the one given but has all computed +// attribute values forced to null. +// +// If isConfig is true then non-null Optional+Computed attribute values will +// be preserved. Otherwise, they will also be set to null. +// +// The input value must conform to the schema's implied type, and the return +// value is guaranteed to conform to it. +func setElementCompareValue(schema *configschema.Block, v cty.Value, isConfig bool) cty.Value { + if v.IsNull() || !v.IsKnown() { + return v + } + + attrs := map[string]cty.Value{} + for name, attr := range schema.Attributes { + switch { + case attr.Computed && attr.Optional: + if isConfig { + attrs[name] = v.GetAttr(name) + } else { + attrs[name] = cty.NullVal(attr.Type) + } + case attr.Computed: + attrs[name] = cty.NullVal(attr.Type) + default: + attrs[name] = v.GetAttr(name) + } + } + + for name, blockType := range schema.BlockTypes { + switch blockType.Nesting { + + case configschema.NestingSingle, configschema.NestingGroup: + attrs[name] = setElementCompareValue(&blockType.Block, v.GetAttr(name), isConfig) + + case configschema.NestingList, configschema.NestingSet: + cv := v.GetAttr(name) + if cv.IsNull() || !cv.IsKnown() { + attrs[name] = cv + continue + } + if l := cv.LengthInt(); l > 0 { + elems := make([]cty.Value, 0, l) + for it := cv.ElementIterator(); it.Next(); { + _, ev := it.Element() + elems = append(elems, setElementCompareValue(&blockType.Block, ev, isConfig)) + } + if blockType.Nesting == configschema.NestingSet { + // SetValEmpty would panic if given elements that are not + // all of the same type, but that's guaranteed not to + // happen here because our input value was _already_ a + // set and we've not changed the types of any elements here. + attrs[name] = cty.SetVal(elems) + } else { + attrs[name] = cty.TupleVal(elems) + } + } else { + if blockType.Nesting == configschema.NestingSet { + attrs[name] = cty.SetValEmpty(blockType.Block.ImpliedType()) + } else { + attrs[name] = cty.EmptyTupleVal + } + } + + case configschema.NestingMap: + cv := v.GetAttr(name) + if cv.IsNull() || !cv.IsKnown() { + attrs[name] = cv + continue + } + elems := make(map[string]cty.Value) + for it := cv.ElementIterator(); it.Next(); { + kv, ev := it.Element() + elems[kv.AsString()] = setElementCompareValue(&blockType.Block, ev, isConfig) + } + attrs[name] = cty.ObjectVal(elems) + + default: + // Should never happen, since the above cases are comprehensive. + panic(fmt.Sprintf("unsupported block nesting mode %s", blockType.Nesting)) + } + } + + return cty.ObjectVal(attrs) +} diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/plan_valid.go b/vendor/github.com/hashicorp/terraform/plans/objchange/plan_valid.go new file mode 100644 index 0000000000..69acb8979a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/objchange/plan_valid.go @@ -0,0 +1,267 @@ +package objchange + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/configs/configschema" +) + +// AssertPlanValid checks checks whether a planned new state returned by a +// provider's PlanResourceChange method is suitable to achieve a change +// from priorState to config. It returns a slice with nonzero length if +// any problems are detected. Because problems here indicate bugs in the +// provider that generated the plannedState, they are written with provider +// developers as an audience, rather than end-users. +// +// All of the given values must have the same type and must conform to the +// implied type of the given schema, or this function may panic or produce +// garbage results. +// +// During planning, a provider may only make changes to attributes that are +// null (unset) in the configuration and are marked as "computed" in the +// resource type schema, in order to insert any default values the provider +// may know about. If the default value cannot be determined until apply time, +// the provider can return an unknown value. Providers are forbidden from +// planning a change that disagrees with any non-null argument in the +// configuration. +// +// As a special exception, providers _are_ allowed to provide attribute values +// conflicting with configuration if and only if the planned value exactly +// matches the corresponding attribute value in the prior state. The provider +// can use this to signal that the new value is functionally equivalent to +// the old and thus no change is required. +func AssertPlanValid(schema *configschema.Block, priorState, config, plannedState cty.Value) []error { + return assertPlanValid(schema, priorState, config, plannedState, nil) +} + +func assertPlanValid(schema *configschema.Block, priorState, config, plannedState cty.Value, path cty.Path) []error { + var errs []error + if plannedState.IsNull() && !config.IsNull() { + errs = append(errs, path.NewErrorf("planned for absense but config wants existence")) + return errs + } + if config.IsNull() && !plannedState.IsNull() { + errs = append(errs, path.NewErrorf("planned for existence but config wants absense")) + return errs + } + if plannedState.IsNull() { + // No further checks possible if the planned value is null + return errs + } + + impTy := schema.ImpliedType() + + for name, attrS := range schema.Attributes { + plannedV := plannedState.GetAttr(name) + configV := config.GetAttr(name) + priorV := cty.NullVal(attrS.Type) + if !priorState.IsNull() { + priorV = priorState.GetAttr(name) + } + + path := append(path, cty.GetAttrStep{Name: name}) + moreErrs := assertPlannedValueValid(attrS, priorV, configV, plannedV, path) + errs = append(errs, moreErrs...) + } + for name, blockS := range schema.BlockTypes { + path := append(path, cty.GetAttrStep{Name: name}) + plannedV := plannedState.GetAttr(name) + configV := config.GetAttr(name) + priorV := cty.NullVal(impTy.AttributeType(name)) + if !priorState.IsNull() { + priorV = priorState.GetAttr(name) + } + if plannedV.RawEquals(configV) { + // Easy path: nothing has changed at all + continue + } + if !plannedV.IsKnown() { + errs = append(errs, path.NewErrorf("attribute representing nested block must not be unknown itself; set nested attribute values to unknown instead")) + continue + } + + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + moreErrs := assertPlanValid(&blockS.Block, priorV, configV, plannedV, path) + errs = append(errs, moreErrs...) + case configschema.NestingList: + // A NestingList might either be a list or a tuple, depending on + // whether there are dynamically-typed attributes inside. However, + // both support a similar-enough API that we can treat them the + // same for our purposes here. + if plannedV.IsNull() { + errs = append(errs, path.NewErrorf("attribute representing a list of nested blocks must be empty to indicate no blocks, not null")) + continue + } + + plannedL := plannedV.LengthInt() + configL := configV.LengthInt() + if plannedL != configL { + errs = append(errs, path.NewErrorf("block count in plan (%d) disagrees with count in config (%d)", plannedL, configL)) + continue + } + for it := plannedV.ElementIterator(); it.Next(); { + idx, plannedEV := it.Element() + path := append(path, cty.IndexStep{Key: idx}) + if !plannedEV.IsKnown() { + errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) + continue + } + if !configV.HasIndex(idx).True() { + continue // should never happen since we checked the lengths above + } + configEV := configV.Index(idx) + priorEV := cty.NullVal(blockS.ImpliedType()) + if !priorV.IsNull() && priorV.HasIndex(idx).True() { + priorEV = priorV.Index(idx) + } + + moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path) + errs = append(errs, moreErrs...) + } + case configschema.NestingMap: + if plannedV.IsNull() { + errs = append(errs, path.NewErrorf("attribute representing a map of nested blocks must be empty to indicate no blocks, not null")) + continue + } + + // A NestingMap might either be a map or an object, depending on + // whether there are dynamically-typed attributes inside, but + // that's decided statically and so all values will have the same + // kind. + if plannedV.Type().IsObjectType() { + plannedAtys := plannedV.Type().AttributeTypes() + configAtys := configV.Type().AttributeTypes() + for k := range plannedAtys { + if _, ok := configAtys[k]; !ok { + errs = append(errs, path.NewErrorf("block key %q from plan is not present in config", k)) + continue + } + path := append(path, cty.GetAttrStep{Name: k}) + + plannedEV := plannedV.GetAttr(k) + if !plannedEV.IsKnown() { + errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) + continue + } + configEV := configV.GetAttr(k) + priorEV := cty.NullVal(blockS.ImpliedType()) + if !priorV.IsNull() && priorV.Type().HasAttribute(k) { + priorEV = priorV.GetAttr(k) + } + moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path) + errs = append(errs, moreErrs...) + } + for k := range configAtys { + if _, ok := plannedAtys[k]; !ok { + errs = append(errs, path.NewErrorf("block key %q from config is not present in plan", k)) + continue + } + } + } else { + plannedL := plannedV.LengthInt() + configL := configV.LengthInt() + if plannedL != configL { + errs = append(errs, path.NewErrorf("block count in plan (%d) disagrees with count in config (%d)", plannedL, configL)) + continue + } + for it := plannedV.ElementIterator(); it.Next(); { + idx, plannedEV := it.Element() + path := append(path, cty.IndexStep{Key: idx}) + if !plannedEV.IsKnown() { + errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) + continue + } + k := idx.AsString() + if !configV.HasIndex(idx).True() { + errs = append(errs, path.NewErrorf("block key %q from plan is not present in config", k)) + continue + } + configEV := configV.Index(idx) + priorEV := cty.NullVal(blockS.ImpliedType()) + if !priorV.IsNull() && priorV.HasIndex(idx).True() { + priorEV = priorV.Index(idx) + } + moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path) + errs = append(errs, moreErrs...) + } + for it := configV.ElementIterator(); it.Next(); { + idx, _ := it.Element() + if !plannedV.HasIndex(idx).True() { + errs = append(errs, path.NewErrorf("block key %q from config is not present in plan", idx.AsString())) + continue + } + } + } + case configschema.NestingSet: + if plannedV.IsNull() { + errs = append(errs, path.NewErrorf("attribute representing a set of nested blocks must be empty to indicate no blocks, not null")) + continue + } + + // Because set elements have no identifier with which to correlate + // them, we can't robustly validate the plan for a nested block + // backed by a set, and so unfortunately we need to just trust the + // provider to do the right thing. :( + // + // (In principle we could correlate elements by matching the + // subset of attributes explicitly set in config, except for the + // special diff suppression rule which allows for there to be a + // planned value that is constructed by mixing part of a prior + // value with part of a config value, creating an entirely new + // element that is not present in either prior nor config.) + for it := plannedV.ElementIterator(); it.Next(); { + idx, plannedEV := it.Element() + path := append(path, cty.IndexStep{Key: idx}) + if !plannedEV.IsKnown() { + errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) + continue + } + } + + default: + panic(fmt.Sprintf("unsupported nesting mode %s", blockS.Nesting)) + } + } + + return errs +} + +func assertPlannedValueValid(attrS *configschema.Attribute, priorV, configV, plannedV cty.Value, path cty.Path) []error { + var errs []error + if plannedV.RawEquals(configV) { + // This is the easy path: provider didn't change anything at all. + return errs + } + if plannedV.RawEquals(priorV) && !priorV.IsNull() { + // Also pretty easy: there is a prior value and the provider has + // returned it unchanged. This indicates that configV and plannedV + // are functionally equivalent and so the provider wishes to disregard + // the configuration value in favor of the prior. + return errs + } + if attrS.Computed && configV.IsNull() { + // The provider is allowed to change the value of any computed + // attribute that isn't explicitly set in the config. + return errs + } + + // If none of the above conditions match, the provider has made an invalid + // change to this attribute. + if priorV.IsNull() { + if attrS.Sensitive { + errs = append(errs, path.NewErrorf("sensitive planned value does not match config value")) + } else { + errs = append(errs, path.NewErrorf("planned value %#v does not match config value %#v", plannedV, configV)) + } + return errs + } + if attrS.Sensitive { + errs = append(errs, path.NewErrorf("sensitive planned value does not match config value nor prior value")) + } else { + errs = append(errs, path.NewErrorf("planned value %#v does not match config value %#v nor prior value %#v", plannedV, configV, priorV)) + } + return errs +} diff --git a/vendor/github.com/hashicorp/terraform/plans/plan.go b/vendor/github.com/hashicorp/terraform/plans/plan.go new file mode 100644 index 0000000000..5a3e4548ef --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/plan.go @@ -0,0 +1,92 @@ +package plans + +import ( + "sort" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// Plan is the top-level type representing a planned set of changes. +// +// A plan is a summary of the set of changes required to move from a current +// state to a goal state derived from configuration. The described changes +// are not applied directly, but contain an approximation of the final +// result that will be completed during apply by resolving any values that +// cannot be predicted. +// +// A plan must always be accompanied by the state and configuration it was +// built from, since the plan does not itself include all of the information +// required to make the changes indicated. +type Plan struct { + VariableValues map[string]DynamicValue + Changes *Changes + TargetAddrs []addrs.Targetable + ProviderSHA256s map[string][]byte + Backend Backend +} + +// Backend represents the backend-related configuration and other data as it +// existed when a plan was created. +type Backend struct { + // Type is the type of backend that the plan will apply against. + Type string + + // Config is the configuration of the backend, whose schema is decided by + // the backend Type. + Config DynamicValue + + // Workspace is the name of the workspace that was active when the plan + // was created. It is illegal to apply a plan created for one workspace + // to the state of another workspace. + // (This constraint is already enforced by the statefile lineage mechanism, + // but storing this explicitly allows us to return a better error message + // in the situation where the user has the wrong workspace selected.) + Workspace string +} + +func NewBackend(typeName string, config cty.Value, configSchema *configschema.Block, workspaceName string) (*Backend, error) { + dv, err := NewDynamicValue(config, configSchema.ImpliedType()) + if err != nil { + return nil, err + } + + return &Backend{ + Type: typeName, + Config: dv, + Workspace: workspaceName, + }, nil +} + +// ProviderAddrs returns a list of all of the provider configuration addresses +// referenced throughout the receiving plan. +// +// The result is de-duplicated so that each distinct address appears only once. +func (p *Plan) ProviderAddrs() []addrs.AbsProviderConfig { + if p == nil || p.Changes == nil { + return nil + } + + m := map[string]addrs.AbsProviderConfig{} + for _, rc := range p.Changes.Resources { + m[rc.ProviderAddr.String()] = rc.ProviderAddr + } + if len(m) == 0 { + return nil + } + + // This is mainly just so we'll get stable results for testing purposes. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + ret := make([]addrs.AbsProviderConfig, len(keys)) + for i, key := range keys { + ret[i] = m[key] + } + + return ret +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/client.go b/vendor/github.com/hashicorp/terraform/plugin/client.go index 7e2f4fecb2..0eab5385b5 100644 --- a/vendor/github.com/hashicorp/terraform/plugin/client.go +++ b/vendor/github.com/hashicorp/terraform/plugin/client.go @@ -19,11 +19,13 @@ func ClientConfig(m discovery.PluginMeta) *plugin.ClientConfig { }) return &plugin.ClientConfig{ - Cmd: exec.Command(m.Path), - HandshakeConfig: Handshake, - Managed: true, - Plugins: PluginMap, - Logger: logger, + Cmd: exec.Command(m.Path), + HandshakeConfig: Handshake, + VersionedPlugins: VersionedPlugins, + Managed: true, + Logger: logger, + AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC}, + AutoMTLS: true, } } diff --git a/vendor/github.com/hashicorp/terraform/plugin/convert/diagnostics.go b/vendor/github.com/hashicorp/terraform/plugin/convert/diagnostics.go new file mode 100644 index 0000000000..51cb2fe2fe --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/convert/diagnostics.go @@ -0,0 +1,132 @@ +package convert + +import ( + proto "github.com/hashicorp/terraform/internal/tfplugin5" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// WarnsAndErrorsToProto converts the warnings and errors return by the legacy +// provider to protobuf diagnostics. +func WarnsAndErrsToProto(warns []string, errs []error) (diags []*proto.Diagnostic) { + for _, w := range warns { + diags = AppendProtoDiag(diags, w) + } + + for _, e := range errs { + diags = AppendProtoDiag(diags, e) + } + + return diags +} + +// AppendProtoDiag appends a new diagnostic from a warning string or an error. +// This panics if d is not a string or error. +func AppendProtoDiag(diags []*proto.Diagnostic, d interface{}) []*proto.Diagnostic { + switch d := d.(type) { + case cty.PathError: + ap := PathToAttributePath(d.Path) + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: d.Error(), + Attribute: ap, + }) + case error: + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: d.Error(), + }) + case string: + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_WARNING, + Summary: d, + }) + case *proto.Diagnostic: + diags = append(diags, d) + case []*proto.Diagnostic: + diags = append(diags, d...) + } + return diags +} + +// ProtoToDiagnostics converts a list of proto.Diagnostics to a tf.Diagnostics. +func ProtoToDiagnostics(ds []*proto.Diagnostic) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + for _, d := range ds { + var severity tfdiags.Severity + + switch d.Severity { + case proto.Diagnostic_ERROR: + severity = tfdiags.Error + case proto.Diagnostic_WARNING: + severity = tfdiags.Warning + } + + var newDiag tfdiags.Diagnostic + + // if there's an attribute path, we need to create a AttributeValue diagnostic + if d.Attribute != nil { + path := AttributePathToPath(d.Attribute) + newDiag = tfdiags.AttributeValue(severity, d.Summary, d.Detail, path) + } else { + newDiag = tfdiags.WholeContainingBody(severity, d.Summary, d.Detail) + } + + diags = diags.Append(newDiag) + } + + return diags +} + +// AttributePathToPath takes the proto encoded path and converts it to a cty.Path +func AttributePathToPath(ap *proto.AttributePath) cty.Path { + var p cty.Path + for _, step := range ap.Steps { + switch selector := step.Selector.(type) { + case *proto.AttributePath_Step_AttributeName: + p = p.GetAttr(selector.AttributeName) + case *proto.AttributePath_Step_ElementKeyString: + p = p.Index(cty.StringVal(selector.ElementKeyString)) + case *proto.AttributePath_Step_ElementKeyInt: + p = p.Index(cty.NumberIntVal(selector.ElementKeyInt)) + } + } + return p +} + +// AttributePathToPath takes a cty.Path and converts it to a proto-encoded path. +func PathToAttributePath(p cty.Path) *proto.AttributePath { + ap := &proto.AttributePath{} + for _, step := range p { + switch selector := step.(type) { + case cty.GetAttrStep: + ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: selector.Name, + }, + }) + case cty.IndexStep: + key := selector.Key + switch key.Type() { + case cty.String: + ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_ElementKeyString{ + ElementKeyString: key.AsString(), + }, + }) + case cty.Number: + v, _ := key.AsBigFloat().Int64() + ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_ElementKeyInt{ + ElementKeyInt: v, + }, + }) + default: + // We'll bail early if we encounter anything else, and just + // return the valid prefix. + return ap + } + } + } + return ap +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/convert/schema.go b/vendor/github.com/hashicorp/terraform/plugin/convert/schema.go new file mode 100644 index 0000000000..6a45f54c95 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/convert/schema.go @@ -0,0 +1,154 @@ +package convert + +import ( + "encoding/json" + "reflect" + "sort" + + "github.com/hashicorp/terraform/configs/configschema" + proto "github.com/hashicorp/terraform/internal/tfplugin5" + "github.com/hashicorp/terraform/providers" +) + +// ConfigSchemaToProto takes a *configschema.Block and converts it to a +// proto.Schema_Block for a grpc response. +func ConfigSchemaToProto(b *configschema.Block) *proto.Schema_Block { + block := &proto.Schema_Block{} + + for _, name := range sortedKeys(b.Attributes) { + a := b.Attributes[name] + attr := &proto.Schema_Attribute{ + Name: name, + Description: a.Description, + Optional: a.Optional, + Computed: a.Computed, + Required: a.Required, + Sensitive: a.Sensitive, + } + + ty, err := json.Marshal(a.Type) + if err != nil { + panic(err) + } + + attr.Type = ty + + block.Attributes = append(block.Attributes, attr) + } + + for _, name := range sortedKeys(b.BlockTypes) { + b := b.BlockTypes[name] + block.BlockTypes = append(block.BlockTypes, protoSchemaNestedBlock(name, b)) + } + + return block +} + +func protoSchemaNestedBlock(name string, b *configschema.NestedBlock) *proto.Schema_NestedBlock { + var nesting proto.Schema_NestedBlock_NestingMode + switch b.Nesting { + case configschema.NestingSingle: + nesting = proto.Schema_NestedBlock_SINGLE + case configschema.NestingGroup: + nesting = proto.Schema_NestedBlock_GROUP + case configschema.NestingList: + nesting = proto.Schema_NestedBlock_LIST + case configschema.NestingSet: + nesting = proto.Schema_NestedBlock_SET + case configschema.NestingMap: + nesting = proto.Schema_NestedBlock_MAP + default: + nesting = proto.Schema_NestedBlock_INVALID + } + return &proto.Schema_NestedBlock{ + TypeName: name, + Block: ConfigSchemaToProto(&b.Block), + Nesting: nesting, + MinItems: int64(b.MinItems), + MaxItems: int64(b.MaxItems), + } +} + +// ProtoToProviderSchema takes a proto.Schema and converts it to a providers.Schema. +func ProtoToProviderSchema(s *proto.Schema) providers.Schema { + return providers.Schema{ + Version: s.Version, + Block: ProtoToConfigSchema(s.Block), + } +} + +// ProtoToConfigSchema takes the GetSchcema_Block from a grpc response and converts it +// to a terraform *configschema.Block. +func ProtoToConfigSchema(b *proto.Schema_Block) *configschema.Block { + block := &configschema.Block{ + Attributes: make(map[string]*configschema.Attribute), + BlockTypes: make(map[string]*configschema.NestedBlock), + } + + for _, a := range b.Attributes { + attr := &configschema.Attribute{ + Description: a.Description, + Required: a.Required, + Optional: a.Optional, + Computed: a.Computed, + Sensitive: a.Sensitive, + } + + if err := json.Unmarshal(a.Type, &attr.Type); err != nil { + panic(err) + } + + block.Attributes[a.Name] = attr + } + + for _, b := range b.BlockTypes { + block.BlockTypes[b.TypeName] = schemaNestedBlock(b) + } + + return block +} + +func schemaNestedBlock(b *proto.Schema_NestedBlock) *configschema.NestedBlock { + var nesting configschema.NestingMode + switch b.Nesting { + case proto.Schema_NestedBlock_SINGLE: + nesting = configschema.NestingSingle + case proto.Schema_NestedBlock_GROUP: + nesting = configschema.NestingGroup + case proto.Schema_NestedBlock_LIST: + nesting = configschema.NestingList + case proto.Schema_NestedBlock_MAP: + nesting = configschema.NestingMap + case proto.Schema_NestedBlock_SET: + nesting = configschema.NestingSet + default: + // In all other cases we'll leave it as the zero value (invalid) and + // let the caller validate it and deal with this. + } + + nb := &configschema.NestedBlock{ + Nesting: nesting, + MinItems: int(b.MinItems), + MaxItems: int(b.MaxItems), + } + + nested := ProtoToConfigSchema(b.Block) + nb.Block = *nested + return nb +} + +// sortedKeys returns the lexically sorted keys from the given map. This is +// used to make schema conversions are deterministic. This panics if map keys +// are not a string. +func sortedKeys(m interface{}) []string { + v := reflect.ValueOf(m) + keys := make([]string, v.Len()) + + mapKeys := v.MapKeys() + for i, k := range mapKeys { + keys[i] = k.Interface().(string) + } + + sort.Strings(keys) + return keys +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/error.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/error.go index df855a76ca..729e97099e 100644 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/error.go +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/error.go @@ -22,9 +22,43 @@ const ErrorNoSuitableVersion = Error("no suitable version is available") // version of Terraform. const ErrorNoVersionCompatible = Error("no available version is compatible with this version of Terraform") +// ErrorVersionIncompatible indicates that all of the versions within the +// constraints are not compatible with the current version of Terrafrom, though +// there does exist a version outside of the constaints that is compatible. +const ErrorVersionIncompatible = Error("incompatible provider version") + // ErrorNoSuchProvider indicates that no provider exists with a name given const ErrorNoSuchProvider = Error("no provider exists with the given name") +// ErrorNoVersionCompatibleWithPlatform indicates that all of the available +// versions that otherwise met constraints are not compatible with the +// requested platform +const ErrorNoVersionCompatibleWithPlatform = Error("no available version is compatible for the requested platform") + +// ErrorMissingChecksumVerification indicates that either the provider +// distribution is missing the SHA256SUMS file or the checksum file does +// not contain a checksum for the binary plugin +const ErrorMissingChecksumVerification = Error("unable to verify checksum") + +// ErrorChecksumVerification indicates that the current checksum of the +// provider plugin has changed since the initial release and is not trusted +// to download +const ErrorChecksumVerification = Error("unexpected plugin checksum") + +// ErrorSignatureVerification indicates that the digital signature for a +// provider distribution could not be verified for one of the following +// reasons: missing signature file, missing public key, or the signature +// was not signed by any known key for the publisher +const ErrorSignatureVerification = Error("unable to verify signature") + +// ErrorServiceUnreachable indicates that the network was unable to connect +// to the registry service +const ErrorServiceUnreachable = Error("registry service is unreachable") + +// ErrorPublicRegistryUnreachable indicates that the network was unable to connect +// to the public registry in particular, so we can show a link to the statuspage +const ErrorPublicRegistryUnreachable = Error("registry service is unreachable, check https://status.hashicorp.com/ for status updates") + func (err Error) Error() string { return string(err) } diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go index 815640f14e..547db47d88 100644 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go @@ -13,28 +13,27 @@ import ( "strconv" "strings" - "golang.org/x/net/html" - + "github.com/hashicorp/errwrap" getter "github.com/hashicorp/go-getter" multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform/httpclient" + "github.com/hashicorp/terraform/registry" + "github.com/hashicorp/terraform/registry/regsrc" + "github.com/hashicorp/terraform/registry/response" + "github.com/hashicorp/terraform/svchost/disco" + "github.com/hashicorp/terraform/tfdiags" + tfversion "github.com/hashicorp/terraform/version" "github.com/mitchellh/cli" ) -// Releases are located by parsing the html listing from releases.hashicorp.com. -// -// The URL for releases follows the pattern: -// https://releases.hashicorp.com/terraform-provider-name//terraform-provider-name___. -// -// The plugin protocol version will be saved with the release and returned in -// the header X-TERRAFORM_PROTOCOL_VERSION. +// Releases are located by querying the terraform registry. const protocolVersionHeader = "x-terraform-protocol-version" -var releaseHost = "https://releases.hashicorp.com" - var httpClient *http.Client +var errVersionNotFound = errors.New("version not found") + func init() { httpClient = httpclient.New() @@ -50,7 +49,7 @@ func init() { // An Installer maintains a local cache of plugins by downloading plugins // from an online repository. type Installer interface { - Get(name string, req Constraints) (PluginMeta, error) + Get(name string, req Constraints) (PluginMeta, tfdiags.Diagnostics, error) PurgeUnused(used map[string]PluginMeta) (removed PluginMetaSet, err error) } @@ -79,6 +78,13 @@ type ProviderInstaller struct { SkipVerify bool Ui cli.Ui // Ui for output + + // Services is a required *disco.Disco, which may have services and + // credentials pre-loaded. + Services *disco.Disco + + // registry client + registry *registry.Client } // Get is part of an implementation of type Installer, and attempts to download @@ -100,96 +106,170 @@ type ProviderInstaller struct { // are produced under the assumption that if presented to the user they will // be presented alongside context about what is being installed, and thus the // error messages do not redundantly include such information. -func (i *ProviderInstaller) Get(provider string, req Constraints) (PluginMeta, error) { - versions, err := i.listProviderVersions(provider) +func (i *ProviderInstaller) Get(provider string, req Constraints) (PluginMeta, tfdiags.Diagnostics, error) { + var diags tfdiags.Diagnostics + + // a little bit of initialization. + if i.OS == "" { + i.OS = runtime.GOOS + } + if i.Arch == "" { + i.Arch = runtime.GOARCH + } + if i.registry == nil { + i.registry = registry.NewClient(i.Services, nil) + } + + // get a full listing of versions for the requested provider + allVersions, err := i.listProviderVersions(provider) + // TODO: return multiple errors if err != nil { - return PluginMeta{}, err + log.Printf("[DEBUG] %s", err) + if registry.IsServiceUnreachable(err) { + registryHost, err := i.hostname() + if err == nil && registryHost == regsrc.PublicRegistryHost.Raw { + return PluginMeta{}, diags, ErrorPublicRegistryUnreachable + } + return PluginMeta{}, diags, ErrorServiceUnreachable + } + if registry.IsServiceNotProvided(err) { + return PluginMeta{}, diags, err + } + return PluginMeta{}, diags, ErrorNoSuchProvider } - if len(versions) == 0 { - return PluginMeta{}, ErrorNoSuitableVersion + // Add any warnings from the response to diags + for _, warning := range allVersions.Warnings { + hostname, err := i.hostname() + if err != nil { + return PluginMeta{}, diags, err + } + diag := tfdiags.SimpleWarning(fmt.Sprintf("%s: %s", hostname, warning)) + diags = diags.Append(diag) } - versions = allowedVersions(versions, req) + if len(allVersions.Versions) == 0 { + return PluginMeta{}, diags, ErrorNoSuitableVersion + } + providerSource := allVersions.ID + + // Filter the list of plugin versions to those which meet the version constraints + versions := allowedVersions(allVersions, req) if len(versions) == 0 { - return PluginMeta{}, ErrorNoSuitableVersion + return PluginMeta{}, diags, ErrorNoSuitableVersion } - // sort them newest to oldest - Versions(versions).Sort() + // sort them newest to oldest. The newest version wins! + response.ProviderVersionCollection(versions).Sort() - // Ensure that our installation directory exists - err = os.MkdirAll(i.Dir, os.ModePerm) - if err != nil { - return PluginMeta{}, fmt.Errorf("failed to create plugin dir %s: %s", i.Dir, err) + // if the chosen provider version does not support the requested platform, + // filter the list of acceptable versions to those that support that platform + if err := i.checkPlatformCompatibility(versions[0]); err != nil { + versions = i.platformCompatibleVersions(versions) + if len(versions) == 0 { + return PluginMeta{}, diags, ErrorNoVersionCompatibleWithPlatform + } } - // take the first matching plugin we find - for _, v := range versions { - url := i.providerURL(provider, v.String()) + // we now have a winning platform-compatible version + versionMeta := versions[0] + v := VersionStr(versionMeta.Version).MustParse() - if !i.SkipVerify { - sha256, err := i.getProviderChecksum(provider, v.String()) - if err != nil { - return PluginMeta{}, err - } + // check protocol compatibility + if err := i.checkPluginProtocol(versionMeta); err != nil { + closestMatch, err := i.findClosestProtocolCompatibleVersion(allVersions.Versions) + if err != nil { + // No operation here if we can't find a version with compatible protocol + return PluginMeta{}, diags, err + } - // add the checksum parameter for go-getter to verify the download for us. - if sha256 != "" { - url = url + "?checksum=sha256:" + sha256 - } + // Prompt version suggestion to UI based on closest protocol match + var errMsg string + closestVersion := VersionStr(closestMatch.Version).MustParse() + if v.NewerThan(closestVersion) { + errMsg = providerProtocolTooNew + } else { + errMsg = providerProtocolTooOld } - log.Printf("[DEBUG] fetching provider info for %s version %s", provider, v) - if checkPlugin(url, i.PluginProtocolVersion) { - i.Ui.Info(fmt.Sprintf("- Downloading plugin for provider %q (%s)...", provider, v.String())) - log.Printf("[DEBUG] getting provider %q version %q", provider, v) - err := i.install(provider, v, url) - if err != nil { - return PluginMeta{}, err - } + constraintStr := req.String() + if constraintStr == "" { + constraintStr = "(any version)" + } - // Find what we just installed - // (This is weird, because go-getter doesn't directly return - // information about what was extracted, and we just extracted - // the archive directly into a shared dir here.) - log.Printf("[DEBUG] looking for the %s %s plugin we just installed", provider, v) - metas := FindPlugins("provider", []string{i.Dir}) - log.Printf("[DEBUG] all plugins found %#v", metas) - metas, _ = metas.ValidateVersions() - metas = metas.WithName(provider).WithVersion(v) - log.Printf("[DEBUG] filtered plugins %#v", metas) - if metas.Count() == 0 { - // This should never happen. Suggests that the release archive - // contains an executable file whose name doesn't match the - // expected convention. - return PluginMeta{}, fmt.Errorf( - "failed to find installed plugin version %s; this is a bug in Terraform and should be reported", - v, - ) - } + return PluginMeta{}, diags, errwrap.Wrap(ErrorVersionIncompatible, fmt.Errorf(fmt.Sprintf( + errMsg, provider, v.String(), tfversion.String(), + closestVersion.String(), closestVersion.MinorUpgradeConstraintStr(), constraintStr))) + } - if metas.Count() > 1 { - // This should also never happen, and suggests that a - // particular version was re-released with a different - // executable filename. We consider releases as immutable, so - // this is an error. - return PluginMeta{}, fmt.Errorf( - "multiple plugins installed for version %s; this is a bug in Terraform and should be reported", - v, - ) - } + downloadURLs, err := i.listProviderDownloadURLs(providerSource, versionMeta.Version) + providerURL := downloadURLs.DownloadURL + + if !i.SkipVerify { + // Terraform verifies the integrity of a provider release before downloading + // the plugin binary. The digital signature (SHA256SUMS.sig) on the + // release distribution (SHA256SUMS) is verified with the public key of the + // publisher provided in the Terraform Registry response, ensuring that + // everything is as intended by the publisher. The checksum of the provider + // plugin is expected in the SHA256SUMS file and is double checked to match + // the checksum of the original published release to the Registry. This + // enforces immutability of releases between the Registry and the plugin's + // host location. Lastly, the integrity of the binary is verified upon + // download matches the Registry and signed checksum. + sha256, err := i.getProviderChecksum(downloadURLs) + if err != nil { + return PluginMeta{}, diags, err + } - // By now we know we have exactly one meta, and so "Newest" will - // return that one. - return metas.Newest(), nil + // add the checksum parameter for go-getter to verify the download for us. + if sha256 != "" { + providerURL = providerURL + "?checksum=sha256:" + sha256 } + } + + printedProviderName := fmt.Sprintf("%s (%s)", provider, providerSource) + i.Ui.Info(fmt.Sprintf("- Downloading plugin for provider %q (%s)...", printedProviderName, versionMeta.Version)) + log.Printf("[DEBUG] getting provider %q version %q", printedProviderName, versionMeta.Version) + err = i.install(provider, v, providerURL) + if err != nil { + return PluginMeta{}, diags, err + } + + // Find what we just installed + // (This is weird, because go-getter doesn't directly return + // information about what was extracted, and we just extracted + // the archive directly into a shared dir here.) + log.Printf("[DEBUG] looking for the %s %s plugin we just installed", provider, versionMeta.Version) + metas := FindPlugins("provider", []string{i.Dir}) + log.Printf("[DEBUG] all plugins found %#v", metas) + metas, _ = metas.ValidateVersions() + metas = metas.WithName(provider).WithVersion(v) + log.Printf("[DEBUG] filtered plugins %#v", metas) + if metas.Count() == 0 { + // This should never happen. Suggests that the release archive + // contains an executable file whose name doesn't match the + // expected convention. + return PluginMeta{}, diags, fmt.Errorf( + "failed to find installed plugin version %s; this is a bug in Terraform and should be reported", + versionMeta.Version, + ) + } - log.Printf("[INFO] incompatible ProtocolVersion for %s version %s", provider, v) + if metas.Count() > 1 { + // This should also never happen, and suggests that a + // particular version was re-released with a different + // executable filename. We consider releases as immutable, so + // this is an error. + return PluginMeta{}, diags, fmt.Errorf( + "multiple plugins installed for version %s; this is a bug in Terraform and should be reported", + versionMeta.Version, + ) } - return PluginMeta{}, ErrorNoVersionCompatible + // By now we know we have exactly one meta, and so "Newest" will + // return that one. + return metas.Newest(), diags, nil } func (i *ProviderInstaller) install(provider string, version Version, url string) error { @@ -215,6 +295,14 @@ func (i *ProviderInstaller) install(provider string, version Version, url string // normal resolution machinery can find it. filename := filepath.Base(cached) targetPath := filepath.Join(i.Dir, filename) + // check if the target dir exists, and create it if not + var err error + if _, StatErr := os.Stat(i.Dir); os.IsNotExist(StatErr) { + err = os.Mkdir(i.Dir, 0700) + } + if err != nil { + return err + } log.Printf("[DEBUG] installing %s %s to %s from local cache %s", provider, version, targetPath, cached) @@ -280,7 +368,6 @@ func (i *ProviderInstaller) install(provider string, version Version, url string return err } } - return nil } @@ -316,182 +403,222 @@ func (i *ProviderInstaller) PurgeUnused(used map[string]PluginMeta) (PluginMetaS return removed, errs } -// Plugins are referred to by the short name, but all URLs and files will use -// the full name prefixed with terraform-- -func (i *ProviderInstaller) providerName(name string) string { - return "terraform-provider-" + name -} +func (i *ProviderInstaller) getProviderChecksum(resp *response.TerraformProviderPlatformLocation) (string, error) { + // Get SHA256SUMS file. + shasums, err := getFile(resp.ShasumsURL) + if err != nil { + log.Printf("[ERROR] error fetching checksums from %q: %s", resp.ShasumsURL, err) + return "", ErrorMissingChecksumVerification + } -func (i *ProviderInstaller) providerFileName(name, version string) string { - os := i.OS - arch := i.Arch - if os == "" { - os = runtime.GOOS + // Get SHA256SUMS.sig file. + signature, err := getFile(resp.ShasumsSignatureURL) + if err != nil { + log.Printf("[ERROR] error fetching checksums signature from %q: %s", resp.ShasumsSignatureURL, err) + return "", ErrorSignatureVerification } - if arch == "" { - arch = runtime.GOARCH + + // Verify the GPG signature returned from the Registry. + asciiArmor := resp.SigningKeys.GPGASCIIArmor() + signer, err := verifySig(shasums, signature, asciiArmor) + if err != nil { + log.Printf("[ERROR] error verifying signature: %s", err) + return "", ErrorSignatureVerification } - return fmt.Sprintf("%s_%s_%s_%s.zip", i.providerName(name), version, os, arch) -} -// providerVersionsURL returns the path to the released versions directory for the provider: -// https://releases.hashicorp.com/terraform-provider-name/ -func (i *ProviderInstaller) providerVersionsURL(name string) string { - return releaseHost + "/" + i.providerName(name) + "/" -} + // Also verify the GPG signature against the HashiCorp public key. This is + // a temporary additional check until a more robust key verification + // process is added in a future release. + _, err = verifySig(shasums, signature, HashicorpPublicKey) + if err != nil { + log.Printf("[ERROR] error verifying signature against HashiCorp public key: %s", err) + return "", ErrorSignatureVerification + } -// providerURL returns the full path to the provider file, using the current OS -// and ARCH: -// .../terraform-provider-name_/terraform-provider-name___. -func (i *ProviderInstaller) providerURL(name, version string) string { - return fmt.Sprintf("%s%s/%s", i.providerVersionsURL(name), version, i.providerFileName(name, version)) -} + // Display identity for GPG key which succeeded verifying the signature. + // This could also be used to display to the user with i.Ui.Info(). + identities := []string{} + for k := range signer.Identities { + identities = append(identities, k) + } + identity := strings.Join(identities, ", ") + log.Printf("[DEBUG] verified GPG signature with key from %s", identity) + + // Extract checksum for this os/arch platform binary and verify against Registry + checksum := checksumForFile(shasums, resp.Filename) + if checksum == "" { + log.Printf("[ERROR] missing checksum for %s from source %s", resp.Filename, resp.ShasumsURL) + return "", ErrorMissingChecksumVerification + } else if checksum != resp.Shasum { + log.Printf("[ERROR] unexpected checksum for %s from source %q", resp.Filename, resp.ShasumsURL) + return "", ErrorChecksumVerification + } -func (i *ProviderInstaller) providerChecksumURL(name, version string) string { - fileName := fmt.Sprintf("%s_%s_SHA256SUMS", i.providerName(name), version) - u := fmt.Sprintf("%s%s/%s", i.providerVersionsURL(name), version, fileName) - return u + return checksum, nil } -func (i *ProviderInstaller) getProviderChecksum(name, version string) (string, error) { - checksums, err := getPluginSHA256SUMs(i.providerChecksumURL(name, version)) +func (i *ProviderInstaller) hostname() (string, error) { + provider := regsrc.NewTerraformProvider("", i.OS, i.Arch) + svchost, err := provider.SvcHost() if err != nil { return "", err } - return checksumForFile(checksums, i.providerFileName(name, version)), nil + return svchost.ForDisplay(), nil } -// Return the plugin version by making a HEAD request to the provided url. -// If the header is not present, we assume the latest version will be -// compatible, and leave the check for discovery or execution. -func checkPlugin(url string, pluginProtocolVersion uint) bool { - resp, err := httpClient.Head(url) - if err != nil { - log.Printf("[ERROR] error fetching plugin headers: %s", err) - return false - } +// list all versions available for the named provider +func (i *ProviderInstaller) listProviderVersions(name string) (*response.TerraformProviderVersions, error) { + provider := regsrc.NewTerraformProvider(name, i.OS, i.Arch) + versions, err := i.registry.TerraformProviderVersions(provider) + return versions, err +} - if resp.StatusCode != http.StatusOK { - log.Println("[ERROR] non-200 status fetching plugin headers:", resp.Status) - return false +func (i *ProviderInstaller) listProviderDownloadURLs(name, version string) (*response.TerraformProviderPlatformLocation, error) { + urls, err := i.registry.TerraformProviderLocation(regsrc.NewTerraformProvider(name, i.OS, i.Arch), version) + if urls == nil { + return nil, fmt.Errorf("No download urls found for provider %s", name) } + return urls, err +} + +// findClosestProtocolCompatibleVersion searches for the provider version with the closest protocol match. +// Prerelease versions are filtered. +func (i *ProviderInstaller) findClosestProtocolCompatibleVersion(versions []*response.TerraformProviderVersion) (*response.TerraformProviderVersion, error) { + // Loop through all the provider versions to find the earliest and latest + // versions that match the installer protocol to then select the closest of the two + var latest, earliest *response.TerraformProviderVersion + for _, version := range versions { + // Prereleases are filtered and will not be suggested + v, err := VersionStr(version.Version).Parse() + if err != nil || v.IsPrerelease() { + continue + } - proto := resp.Header.Get(protocolVersionHeader) - if proto == "" { - // The header isn't present, but we don't make this error fatal since - // the latest version will probably work. - log.Printf("[WARN] missing %s from: %s", protocolVersionHeader, url) - return true + if err := i.checkPluginProtocol(version); err == nil { + if earliest == nil { + // Found the first provider version with compatible protocol + earliest = version + } + // Update the latest protocol compatible version + latest = version + } + } + if earliest == nil { + // No compatible protocol was found for any version + return nil, ErrorNoVersionCompatible } - protoVersion, err := strconv.Atoi(proto) + // Convert protocols to comparable types + protoString := strconv.Itoa(int(i.PluginProtocolVersion)) + protocolVersion, err := VersionStr(protoString).Parse() if err != nil { - log.Printf("[ERROR] invalid ProtocolVersion: %s", proto) - return false + return nil, fmt.Errorf("invalid plugin protocol version: %q", i.PluginProtocolVersion) } - return protoVersion == int(pluginProtocolVersion) -} - -// list the version available for the named plugin -func (i *ProviderInstaller) listProviderVersions(name string) ([]Version, error) { - versions, err := listPluginVersions(i.providerVersionsURL(name)) + earliestVersionProtocol, err := VersionStr(earliest.Protocols[0]).Parse() if err != nil { - // listPluginVersions returns a verbose error message indicating - // what was being accessed and what failed return nil, err } - return versions, nil -} - -var errVersionNotFound = errors.New("version not found") -// take the list of available versions for a plugin, and filter out those that -// don't fit the constraints. -func allowedVersions(available []Version, required Constraints) []Version { - var allowed []Version - - for _, v := range available { - if required.Allows(v) { - allowed = append(allowed, v) - } + // Compare installer protocol version with the first protocol listed of the earliest match + // [A, B] where A is assumed the earliest compatible major version of the protocol pair + if protocolVersion.NewerThan(earliestVersionProtocol) { + // Provider protocols are too old, the closest version is the earliest compatible version + return earliest, nil } - return allowed + // Provider protocols are too new, the closest version is the latest compatible version + return latest, nil } -// return a list of the plugin versions at the given URL -func listPluginVersions(url string) ([]Version, error) { - resp, err := httpClient.Get(url) +func (i *ProviderInstaller) checkPluginProtocol(versionMeta *response.TerraformProviderVersion) error { + // TODO: should this be a different error? We should probably differentiate between + // no compatible versions and no protocol versions listed at all + if len(versionMeta.Protocols) == 0 { + return fmt.Errorf("no plugin protocol versions listed") + } + + protoString := strconv.Itoa(int(i.PluginProtocolVersion)) + protocolVersion, err := VersionStr(protoString).Parse() if err != nil { - // http library produces a verbose error message that includes the - // URL being accessed, etc. - return nil, err + return fmt.Errorf("invalid plugin protocol version: %q", i.PluginProtocolVersion) + } + protocolConstraint, err := protocolVersion.MinorUpgradeConstraintStr().Parse() + if err != nil { + // This should not fail if the preceding function succeeded. + return fmt.Errorf("invalid plugin protocol version: %q", protocolVersion.String()) } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - body, _ := ioutil.ReadAll(resp.Body) - log.Printf("[ERROR] failed to fetch plugin versions from %s\n%s\n%s", url, resp.Status, body) - - switch resp.StatusCode { - case http.StatusNotFound, http.StatusForbidden: - // These are treated as indicative of the given name not being - // a valid provider name at all. - return nil, ErrorNoSuchProvider - - default: - // All other errors are assumed to be operational problems. - return nil, fmt.Errorf("error accessing %s: %s", url, resp.Status) + for _, p := range versionMeta.Protocols { + proPro, err := VersionStr(p).Parse() + if err != nil { + // invalid protocol reported by the registry. Move along. + log.Printf("[WARN] invalid provider protocol version %q found in the registry", versionMeta.Version) + continue + } + // success! + if protocolConstraint.Allows(proPro) { + return nil } - } - body, err := html.Parse(resp.Body) - if err != nil { - log.Fatal(err) + return ErrorNoVersionCompatible +} + +// REVIEWER QUESTION (again): this ends up swallowing a bunch of errors from +// checkPluginProtocol. Do they need to be percolated up better, or would +// debug messages would suffice in these situations? +func (i *ProviderInstaller) findPlatformCompatibleVersion(versions []*response.TerraformProviderVersion) (*response.TerraformProviderVersion, error) { + for _, version := range versions { + if err := i.checkPlatformCompatibility(version); err == nil { + return version, nil + } } - names := []string{} + return nil, ErrorNoVersionCompatibleWithPlatform +} - // all we need to do is list links on the directory listing page that look like plugins - var f func(*html.Node) - f = func(n *html.Node) { - if n.Type == html.ElementNode && n.Data == "a" { - c := n.FirstChild - if c != nil && c.Type == html.TextNode && strings.HasPrefix(c.Data, "terraform-") { - names = append(names, c.Data) - return - } - } - for c := n.FirstChild; c != nil; c = c.NextSibling { - f(c) +// platformCompatibleVersions returns a list of provider versions that are +// compatible with the requested platform. +func (i *ProviderInstaller) platformCompatibleVersions(versions []*response.TerraformProviderVersion) []*response.TerraformProviderVersion { + var v []*response.TerraformProviderVersion + for _, version := range versions { + if err := i.checkPlatformCompatibility(version); err == nil { + v = append(v, version) } } - f(body) + return v +} - return versionsFromNames(names), nil +func (i *ProviderInstaller) checkPlatformCompatibility(versionMeta *response.TerraformProviderVersion) error { + if len(versionMeta.Platforms) == 0 { + return fmt.Errorf("no supported provider platforms listed") + } + for _, p := range versionMeta.Platforms { + if p.Arch == i.Arch && p.OS == i.OS { + return nil + } + } + return fmt.Errorf("version %s does not support the requested platform %s_%s", versionMeta.Version, i.OS, i.Arch) } -// parse the list of directory names into a sorted list of available versions -func versionsFromNames(names []string) []Version { - var versions []Version - for _, name := range names { - parts := strings.SplitN(name, "_", 2) - if len(parts) == 2 && parts[1] != "" { - v, err := VersionStr(parts[1]).Parse() - if err != nil { - // filter invalid versions scraped from the page - log.Printf("[WARN] invalid version found for %q: %s", name, err) - continue - } +// take the list of available versions for a plugin, and filter out those that +// don't fit the constraints. +func allowedVersions(available *response.TerraformProviderVersions, required Constraints) []*response.TerraformProviderVersion { + var allowed []*response.TerraformProviderVersion - versions = append(versions, v) + for _, v := range available.Versions { + version, err := VersionStr(v.Version).Parse() + if err != nil { + log.Printf("[WARN] invalid version found for %q: %s", available.ID, err) + continue + } + if required.Allows(version) { + allowed = append(allowed, v) } } - - return versions + return allowed } func checksumForFile(sums []byte, name string) string { @@ -504,27 +631,6 @@ func checksumForFile(sums []byte, name string) string { return "" } -// fetch the SHA256SUMS file provided, and verify its signature. -func getPluginSHA256SUMs(sumsURL string) ([]byte, error) { - sigURL := sumsURL + ".sig" - - sums, err := getFile(sumsURL) - if err != nil { - return nil, fmt.Errorf("error fetching checksums: %s", err) - } - - sig, err := getFile(sigURL) - if err != nil { - return nil, fmt.Errorf("error fetching checksums signature: %s", err) - } - - if err := verifySig(sums, sig); err != nil { - return nil, err - } - - return sums, nil -} - func getFile(url string) ([]byte, error) { resp, err := httpClient.Get(url) if err != nil { @@ -543,6 +649,41 @@ func getFile(url string) ([]byte, error) { return data, nil } -func GetReleaseHost() string { - return releaseHost -} +// providerProtocolTooOld is a message sent to the CLI UI if the provider's +// supported protocol versions are too old for the user's version of terraform, +// but an older version of the provider is compatible. +const providerProtocolTooOld = ` +[reset][bold][red]Provider %q v%s is not compatible with Terraform %s.[reset][red] + +Provider version %s is the earliest compatible version. Select it with +the following version constraint: + + version = %q + +Terraform checked all of the plugin versions matching the given constraint: + %s + +Consult the documentation for this provider for more information on +compatibility between provider and Terraform versions. +` + +// providerProtocolTooNew is a message sent to the CLI UI if the provider's +// supported protocol versions are too new for the user's version of terraform, +// and the user could either upgrade terraform or choose an older version of the +// provider +const providerProtocolTooNew = ` +[reset][bold][red]Provider %q v%s is not compatible with Terraform %s.[reset][red] + +Provider version %s is the latest compatible version. Select it with +the following constraint: + + version = %q + +Terraform checked all of the plugin versions matching the given constraint: + %s + +Consult the documentation for this provider for more information on +compatibility between provider and Terraform versions. + +Alternatively, upgrade to the latest version of Terraform for compatibility with newer provider releases. +` diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/hashicorp.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/hashicorp.go new file mode 100644 index 0000000000..4622ca0545 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/hashicorp.go @@ -0,0 +1,34 @@ +package discovery + +// HashicorpPublicKey is the HashiCorp public key, also available at +// https://www.hashicorp.com/security +const HashicorpPublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1 + +mQENBFMORM0BCADBRyKO1MhCirazOSVwcfTr1xUxjPvfxD3hjUwHtjsOy/bT6p9f +W2mRPfwnq2JB5As+paL3UGDsSRDnK9KAxQb0NNF4+eVhr/EJ18s3wwXXDMjpIifq +fIm2WyH3G+aRLTLPIpscUNKDyxFOUbsmgXAmJ46Re1fn8uKxKRHbfa39aeuEYWFA +3drdL1WoUngvED7f+RnKBK2G6ZEpO+LDovQk19xGjiMTtPJrjMjZJ3QXqPvx5wca +KSZLr4lMTuoTI/ZXyZy5bD4tShiZz6KcyX27cD70q2iRcEZ0poLKHyEIDAi3TM5k +SwbbWBFd5RNPOR0qzrb/0p9ksKK48IIfH2FvABEBAAG0K0hhc2hpQ29ycCBTZWN1 +cml0eSA8c2VjdXJpdHlAaGFzaGljb3JwLmNvbT6JATgEEwECACIFAlMORM0CGwMG +CwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEFGFLYc0j/xMyWIIAIPhcVqiQ59n +Jc07gjUX0SWBJAxEG1lKxfzS4Xp+57h2xxTpdotGQ1fZwsihaIqow337YHQI3q0i +SqV534Ms+j/tU7X8sq11xFJIeEVG8PASRCwmryUwghFKPlHETQ8jJ+Y8+1asRydi +psP3B/5Mjhqv/uOK+Vy3zAyIpyDOMtIpOVfjSpCplVRdtSTFWBu9Em7j5I2HMn1w +sJZnJgXKpybpibGiiTtmnFLOwibmprSu04rsnP4ncdC2XRD4wIjoyA+4PKgX3sCO +klEzKryWYBmLkJOMDdo52LttP3279s7XrkLEE7ia0fXa2c12EQ0f0DQ1tGUvyVEW +WmJVccm5bq25AQ0EUw5EzQEIANaPUY04/g7AmYkOMjaCZ6iTp9hB5Rsj/4ee/ln9 +wArzRO9+3eejLWh53FoN1rO+su7tiXJA5YAzVy6tuolrqjM8DBztPxdLBbEi4V+j +2tK0dATdBQBHEh3OJApO2UBtcjaZBT31zrG9K55D+CrcgIVEHAKY8Cb4kLBkb5wM +skn+DrASKU0BNIV1qRsxfiUdQHZfSqtp004nrql1lbFMLFEuiY8FZrkkQ9qduixo +mTT6f34/oiY+Jam3zCK7RDN/OjuWheIPGj/Qbx9JuNiwgX6yRj7OE1tjUx6d8g9y +0H1fmLJbb3WZZbuuGFnK6qrE3bGeY8+AWaJAZ37wpWh1p0cAEQEAAYkBHwQYAQIA +CQUCUw5EzQIbDAAKCRBRhS2HNI/8TJntCAClU7TOO/X053eKF1jqNW4A1qpxctVc +z8eTcY8Om5O4f6a/rfxfNFKn9Qyja/OG1xWNobETy7MiMXYjaa8uUx5iFy6kMVaP +0BXJ59NLZjMARGw6lVTYDTIvzqqqwLxgliSDfSnqUhubGwvykANPO+93BBx89MRG +unNoYGXtPlhNFrAsB1VR8+EyKLv2HQtGCPSFBhrjuzH3gxGibNDDdFQLxxuJWepJ +EK1UbTS4ms0NgZ2Uknqn1WRU1Ki7rE4sTy68iZtWpKQXZEJa0IGnuI2sSINGcXCJ +oEIgXTMyCILo34Fa/C6VCm2WBgz9zZO8/rHIiQm1J5zqz0DrDwKBUM9C +=LYpS +-----END PGP PUBLIC KEY BLOCK-----` diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go index 181ea1fcd6..3a992892df 100644 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go @@ -63,7 +63,7 @@ func (s PluginMetaSet) WithName(name string) PluginMetaSet { // WithVersion returns the subset of metas that have the given version. // // This should be used only with the "valid" result from ValidateVersions; -// it will ignore any plugin metas that have a invalid version strings. +// it will ignore any plugin metas that have invalid version strings. func (s PluginMetaSet) WithVersion(version Version) PluginMetaSet { ns := make(PluginMetaSet) for p := range s { diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go index 75430fdd60..0466ab25ae 100644 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go @@ -4,6 +4,12 @@ import ( "bytes" ) +// PluginInstallProtocolVersion is the protocol version TF-core +// supports to communicate with servers, and is used to resolve +// plugin discovery with terraform registry, in addition to +// any specified plugin version constraints +const PluginInstallProtocolVersion = 5 + // PluginRequirements describes a set of plugins (assumed to be of a consistent // kind) that are required to exist and have versions within the given // corresponding sets. diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/signature.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/signature.go index b6686a5d5c..7bbae50c38 100644 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/signature.go +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/signature.go @@ -2,7 +2,6 @@ package discovery import ( "bytes" - "log" "strings" "golang.org/x/crypto/openpgp" @@ -10,44 +9,11 @@ import ( // Verify the data using the provided openpgp detached signature and the // embedded hashicorp public key. -func verifySig(data, sig []byte) error { - el, err := openpgp.ReadArmoredKeyRing(strings.NewReader(hashiPublicKey)) +func verifySig(data, sig []byte, armor string) (*openpgp.Entity, error) { + el, err := openpgp.ReadArmoredKeyRing(strings.NewReader(armor)) if err != nil { - log.Fatal(err) + return nil, err } - _, err = openpgp.CheckDetachedSignature(el, bytes.NewReader(data), bytes.NewReader(sig)) - return err + return openpgp.CheckDetachedSignature(el, bytes.NewReader(data), bytes.NewReader(sig)) } - -// this is the public key that signs the checksums file for releases. -const hashiPublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1 - -mQENBFMORM0BCADBRyKO1MhCirazOSVwcfTr1xUxjPvfxD3hjUwHtjsOy/bT6p9f -W2mRPfwnq2JB5As+paL3UGDsSRDnK9KAxQb0NNF4+eVhr/EJ18s3wwXXDMjpIifq -fIm2WyH3G+aRLTLPIpscUNKDyxFOUbsmgXAmJ46Re1fn8uKxKRHbfa39aeuEYWFA -3drdL1WoUngvED7f+RnKBK2G6ZEpO+LDovQk19xGjiMTtPJrjMjZJ3QXqPvx5wca -KSZLr4lMTuoTI/ZXyZy5bD4tShiZz6KcyX27cD70q2iRcEZ0poLKHyEIDAi3TM5k -SwbbWBFd5RNPOR0qzrb/0p9ksKK48IIfH2FvABEBAAG0K0hhc2hpQ29ycCBTZWN1 -cml0eSA8c2VjdXJpdHlAaGFzaGljb3JwLmNvbT6JATgEEwECACIFAlMORM0CGwMG -CwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEFGFLYc0j/xMyWIIAIPhcVqiQ59n -Jc07gjUX0SWBJAxEG1lKxfzS4Xp+57h2xxTpdotGQ1fZwsihaIqow337YHQI3q0i -SqV534Ms+j/tU7X8sq11xFJIeEVG8PASRCwmryUwghFKPlHETQ8jJ+Y8+1asRydi -psP3B/5Mjhqv/uOK+Vy3zAyIpyDOMtIpOVfjSpCplVRdtSTFWBu9Em7j5I2HMn1w -sJZnJgXKpybpibGiiTtmnFLOwibmprSu04rsnP4ncdC2XRD4wIjoyA+4PKgX3sCO -klEzKryWYBmLkJOMDdo52LttP3279s7XrkLEE7ia0fXa2c12EQ0f0DQ1tGUvyVEW -WmJVccm5bq25AQ0EUw5EzQEIANaPUY04/g7AmYkOMjaCZ6iTp9hB5Rsj/4ee/ln9 -wArzRO9+3eejLWh53FoN1rO+su7tiXJA5YAzVy6tuolrqjM8DBztPxdLBbEi4V+j -2tK0dATdBQBHEh3OJApO2UBtcjaZBT31zrG9K55D+CrcgIVEHAKY8Cb4kLBkb5wM -skn+DrASKU0BNIV1qRsxfiUdQHZfSqtp004nrql1lbFMLFEuiY8FZrkkQ9qduixo -mTT6f34/oiY+Jam3zCK7RDN/OjuWheIPGj/Qbx9JuNiwgX6yRj7OE1tjUx6d8g9y -0H1fmLJbb3WZZbuuGFnK6qrE3bGeY8+AWaJAZ37wpWh1p0cAEQEAAYkBHwQYAQIA -CQUCUw5EzQIbDAAKCRBRhS2HNI/8TJntCAClU7TOO/X053eKF1jqNW4A1qpxctVc -z8eTcY8Om5O4f6a/rfxfNFKn9Qyja/OG1xWNobETy7MiMXYjaa8uUx5iFy6kMVaP -0BXJ59NLZjMARGw6lVTYDTIvzqqqwLxgliSDfSnqUhubGwvykANPO+93BBx89MRG -unNoYGXtPlhNFrAsB1VR8+EyKLv2HQtGCPSFBhrjuzH3gxGibNDDdFQLxxuJWepJ -EK1UbTS4ms0NgZ2Uknqn1WRU1Ki7rE4sTy68iZtWpKQXZEJa0IGnuI2sSINGcXCJ -oEIgXTMyCILo34Fa/C6VCm2WBgz9zZO8/rHIiQm1J5zqz0DrDwKBUM9C -=LYpS ------END PGP PUBLIC KEY BLOCK-----` diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/version.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/version.go index 8fad58d67f..4311d51076 100644 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/version.go +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/version.go @@ -55,6 +55,11 @@ func (v Version) Equal(other Version) bool { return v.raw.Equal(other.raw) } +// IsPrerelease determines if version is a prerelease +func (v Version) IsPrerelease() bool { + return v.raw.Prerelease() != "" +} + // MinorUpgradeConstraintStr returns a ConstraintStr that would permit // minor upgrades relative to the receiving version. func (v Version) MinorUpgradeConstraintStr() ConstraintStr { diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go index 0aefd759fd..de02f5ec5b 100644 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go @@ -36,6 +36,11 @@ type Constraints struct { raw version.Constraints } +// NewConstraints creates a Constraints based on a version.Constraints. +func NewConstraints(c version.Constraints) Constraints { + return Constraints{c} +} + // AllVersions is a Constraints containing all versions var AllVersions Constraints diff --git a/vendor/github.com/hashicorp/terraform/plugin/grpc_provider.go b/vendor/github.com/hashicorp/terraform/plugin/grpc_provider.go new file mode 100644 index 0000000000..ff1db04cdc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/grpc_provider.go @@ -0,0 +1,563 @@ +package plugin + +import ( + "context" + "errors" + "io" + "log" + "sync" + + "github.com/zclconf/go-cty/cty" + + plugin "github.com/hashicorp/go-plugin" + proto "github.com/hashicorp/terraform/internal/tfplugin5" + "github.com/hashicorp/terraform/plugin/convert" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/version" + "github.com/zclconf/go-cty/cty/msgpack" + "google.golang.org/grpc" +) + +// GRPCProviderPlugin implements plugin.GRPCPlugin for the go-plugin package. +type GRPCProviderPlugin struct { + plugin.Plugin + GRPCProvider func() proto.ProviderServer +} + +func (p *GRPCProviderPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { + return &GRPCProvider{ + client: proto.NewProviderClient(c), + ctx: ctx, + }, nil +} + +func (p *GRPCProviderPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { + proto.RegisterProviderServer(s, p.GRPCProvider()) + return nil +} + +// GRPCProvider handles the client, or core side of the plugin rpc connection. +// The GRPCProvider methods are mostly a translation layer between the +// terraform provioders types and the grpc proto types, directly converting +// between the two. +type GRPCProvider struct { + // PluginClient provides a reference to the plugin.Client which controls the plugin process. + // This allows the GRPCProvider a way to shutdown the plugin process. + PluginClient *plugin.Client + + // TestListener contains a net.Conn to close when the GRPCProvider is being + // used in an end to end test of a provider. + TestListener io.Closer + + // Proto client use to make the grpc service calls. + client proto.ProviderClient + + // this context is created by the plugin package, and is canceled when the + // plugin process ends. + ctx context.Context + + // schema stores the schema for this provider. This is used to properly + // serialize the state for requests. + mu sync.Mutex + schemas providers.GetSchemaResponse +} + +// getSchema is used internally to get the saved provider schema. The schema +// should have already been fetched from the provider, but we have to +// synchronize access to avoid being called concurrently with GetSchema. +func (p *GRPCProvider) getSchema() providers.GetSchemaResponse { + p.mu.Lock() + // unlock inline in case GetSchema needs to be called + if p.schemas.Provider.Block != nil { + p.mu.Unlock() + return p.schemas + } + p.mu.Unlock() + + // the schema should have been fetched already, but give it another shot + // just in case things are being called out of order. This may happen for + // tests. + schemas := p.GetSchema() + if schemas.Diagnostics.HasErrors() { + panic(schemas.Diagnostics.Err()) + } + + return schemas +} + +// getResourceSchema is a helper to extract the schema for a resource, and +// panics if the schema is not available. +func (p *GRPCProvider) getResourceSchema(name string) providers.Schema { + schema := p.getSchema() + resSchema, ok := schema.ResourceTypes[name] + if !ok { + panic("unknown resource type " + name) + } + return resSchema +} + +// gettDatasourceSchema is a helper to extract the schema for a datasource, and +// panics if that schema is not available. +func (p *GRPCProvider) getDatasourceSchema(name string) providers.Schema { + schema := p.getSchema() + dataSchema, ok := schema.DataSources[name] + if !ok { + panic("unknown data source " + name) + } + return dataSchema +} + +func (p *GRPCProvider) GetSchema() (resp providers.GetSchemaResponse) { + log.Printf("[TRACE] GRPCProvider: GetSchema") + p.mu.Lock() + defer p.mu.Unlock() + + if p.schemas.Provider.Block != nil { + return p.schemas + } + + resp.ResourceTypes = make(map[string]providers.Schema) + resp.DataSources = make(map[string]providers.Schema) + + // Some providers may generate quite large schemas, and the internal default + // grpc response size limit is 4MB. 64MB should cover most any use case, and + // if we get providers nearing that we may want to consider a finer-grained + // API to fetch individual resource schemas. + // Note: this option is marked as EXPERIMENTAL in the grpc API. + const maxRecvSize = 64 << 20 + protoResp, err := p.client.GetSchema(p.ctx, new(proto.GetProviderSchema_Request), grpc.MaxRecvMsgSizeCallOption{MaxRecvMsgSize: maxRecvSize}) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + if protoResp.Provider == nil { + resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing provider schema")) + return resp + } + + resp.Provider = convert.ProtoToProviderSchema(protoResp.Provider) + + for name, res := range protoResp.ResourceSchemas { + resp.ResourceTypes[name] = convert.ProtoToProviderSchema(res) + } + + for name, data := range protoResp.DataSourceSchemas { + resp.DataSources[name] = convert.ProtoToProviderSchema(data) + } + + p.schemas = resp + + return resp +} + +func (p *GRPCProvider) PrepareProviderConfig(r providers.PrepareProviderConfigRequest) (resp providers.PrepareProviderConfigResponse) { + log.Printf("[TRACE] GRPCProvider: PrepareProviderConfig") + + schema := p.getSchema() + ty := schema.Provider.Block.ImpliedType() + + mp, err := msgpack.Marshal(r.Config, ty) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.PrepareProviderConfig_Request{ + Config: &proto.DynamicValue{Msgpack: mp}, + } + + protoResp, err := p.client.PrepareProviderConfig(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + config := cty.NullVal(ty) + if protoResp.PreparedConfig != nil { + config, err = msgpack.Unmarshal(protoResp.PreparedConfig.Msgpack, ty) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + } + resp.PreparedConfig = config + + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) ValidateResourceTypeConfig(r providers.ValidateResourceTypeConfigRequest) (resp providers.ValidateResourceTypeConfigResponse) { + log.Printf("[TRACE] GRPCProvider: ValidateResourceTypeConfig") + resourceSchema := p.getResourceSchema(r.TypeName) + + mp, err := msgpack.Marshal(r.Config, resourceSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ValidateResourceTypeConfig_Request{ + TypeName: r.TypeName, + Config: &proto.DynamicValue{Msgpack: mp}, + } + + protoResp, err := p.client.ValidateResourceTypeConfig(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) ValidateDataSourceConfig(r providers.ValidateDataSourceConfigRequest) (resp providers.ValidateDataSourceConfigResponse) { + log.Printf("[TRACE] GRPCProvider: ValidateDataSourceConfig") + + dataSchema := p.getDatasourceSchema(r.TypeName) + + mp, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ValidateDataSourceConfig_Request{ + TypeName: r.TypeName, + Config: &proto.DynamicValue{Msgpack: mp}, + } + + protoResp, err := p.client.ValidateDataSourceConfig(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { + log.Printf("[TRACE] GRPCProvider: UpgradeResourceState") + + resSchema := p.getResourceSchema(r.TypeName) + + protoReq := &proto.UpgradeResourceState_Request{ + TypeName: r.TypeName, + Version: int64(r.Version), + RawState: &proto.RawState{ + Json: r.RawStateJSON, + Flatmap: r.RawStateFlatmap, + }, + } + + protoResp, err := p.client.UpgradeResourceState(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + state := cty.NullVal(resSchema.Block.ImpliedType()) + if protoResp.UpgradedState != nil { + state, err = msgpack.Unmarshal(protoResp.UpgradedState.Msgpack, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + } + + resp.UpgradedState = state + return resp +} + +func (p *GRPCProvider) Configure(r providers.ConfigureRequest) (resp providers.ConfigureResponse) { + log.Printf("[TRACE] GRPCProvider: Configure") + + schema := p.getSchema() + + var mp []byte + + // we don't have anything to marshal if there's no config + mp, err := msgpack.Marshal(r.Config, schema.Provider.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.Configure_Request{ + TerraformVersion: version.Version, + Config: &proto.DynamicValue{ + Msgpack: mp, + }, + } + + protoResp, err := p.client.Configure(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) Stop() error { + log.Printf("[TRACE] GRPCProvider: Stop") + + resp, err := p.client.Stop(p.ctx, new(proto.Stop_Request)) + if err != nil { + return err + } + + if resp.Error != "" { + return errors.New(resp.Error) + } + return nil +} + +func (p *GRPCProvider) ReadResource(r providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + log.Printf("[TRACE] GRPCProvider: ReadResource") + + resSchema := p.getResourceSchema(r.TypeName) + + mp, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ReadResource_Request{ + TypeName: r.TypeName, + CurrentState: &proto.DynamicValue{Msgpack: mp}, + } + + protoResp, err := p.client.ReadResource(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + state := cty.NullVal(resSchema.Block.ImpliedType()) + if protoResp.NewState != nil { + state, err = msgpack.Unmarshal(protoResp.NewState.Msgpack, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + } + resp.NewState = state + + return resp +} + +func (p *GRPCProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + log.Printf("[TRACE] GRPCProvider: PlanResourceChange") + + resSchema := p.getResourceSchema(r.TypeName) + + priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + propMP, err := msgpack.Marshal(r.ProposedNewState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.PlanResourceChange_Request{ + TypeName: r.TypeName, + PriorState: &proto.DynamicValue{Msgpack: priorMP}, + Config: &proto.DynamicValue{Msgpack: configMP}, + ProposedNewState: &proto.DynamicValue{Msgpack: propMP}, + PriorPrivate: r.PriorPrivate, + } + + protoResp, err := p.client.PlanResourceChange(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + state := cty.NullVal(resSchema.Block.ImpliedType()) + if protoResp.PlannedState != nil { + state, err = msgpack.Unmarshal(protoResp.PlannedState.Msgpack, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + } + resp.PlannedState = state + + for _, p := range protoResp.RequiresReplace { + resp.RequiresReplace = append(resp.RequiresReplace, convert.AttributePathToPath(p)) + } + + resp.PlannedPrivate = protoResp.PlannedPrivate + + resp.LegacyTypeSystem = protoResp.LegacyTypeSystem + + return resp +} + +func (p *GRPCProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + log.Printf("[TRACE] GRPCProvider: ApplyResourceChange") + + resSchema := p.getResourceSchema(r.TypeName) + + priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + plannedMP, err := msgpack.Marshal(r.PlannedState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ApplyResourceChange_Request{ + TypeName: r.TypeName, + PriorState: &proto.DynamicValue{Msgpack: priorMP}, + PlannedState: &proto.DynamicValue{Msgpack: plannedMP}, + Config: &proto.DynamicValue{Msgpack: configMP}, + PlannedPrivate: r.PlannedPrivate, + } + + protoResp, err := p.client.ApplyResourceChange(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + resp.Private = protoResp.Private + + state := cty.NullVal(resSchema.Block.ImpliedType()) + if protoResp.NewState != nil { + state, err = msgpack.Unmarshal(protoResp.NewState.Msgpack, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + } + resp.NewState = state + + resp.LegacyTypeSystem = protoResp.LegacyTypeSystem + + return resp +} + +func (p *GRPCProvider) ImportResourceState(r providers.ImportResourceStateRequest) (resp providers.ImportResourceStateResponse) { + log.Printf("[TRACE] GRPCProvider: ImportResourceState") + + protoReq := &proto.ImportResourceState_Request{ + TypeName: r.TypeName, + Id: r.ID, + } + + protoResp, err := p.client.ImportResourceState(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + for _, imported := range protoResp.ImportedResources { + resource := providers.ImportedResource{ + TypeName: imported.TypeName, + Private: imported.Private, + } + + resSchema := p.getResourceSchema(resource.TypeName) + state := cty.NullVal(resSchema.Block.ImpliedType()) + if imported.State != nil { + state, err = msgpack.Unmarshal(imported.State.Msgpack, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + } + resource.State = state + resp.ImportedResources = append(resp.ImportedResources, resource) + } + + return resp +} + +func (p *GRPCProvider) ReadDataSource(r providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { + log.Printf("[TRACE] GRPCProvider: ReadDataSource") + + dataSchema := p.getDatasourceSchema(r.TypeName) + + config, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ReadDataSource_Request{ + TypeName: r.TypeName, + Config: &proto.DynamicValue{ + Msgpack: config, + }, + } + + protoResp, err := p.client.ReadDataSource(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + state := cty.NullVal(dataSchema.Block.ImpliedType()) + if protoResp.State != nil { + state, err = msgpack.Unmarshal(protoResp.State.Msgpack, dataSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + } + resp.State = state + + return resp +} + +// closing the grpc connection is final, and terraform will call it at the end of every phase. +func (p *GRPCProvider) Close() error { + log.Printf("[TRACE] GRPCProvider: Close") + + // close the remote listener if we're running within a test + if p.TestListener != nil { + p.TestListener.Close() + } + + // Check this since it's not automatically inserted during plugin creation. + // It's currently only inserted by the command package, because that is + // where the factory is built and is the only point with access to the + // plugin.Client. + if p.PluginClient == nil { + log.Println("[DEBUG] provider has no plugin.Client") + return nil + } + + p.PluginClient.Kill() + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/grpc_provisioner.go b/vendor/github.com/hashicorp/terraform/plugin/grpc_provisioner.go new file mode 100644 index 0000000000..136c88d681 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/grpc_provisioner.go @@ -0,0 +1,178 @@ +package plugin + +import ( + "context" + "errors" + "io" + "log" + "sync" + + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform/configs/configschema" + proto "github.com/hashicorp/terraform/internal/tfplugin5" + "github.com/hashicorp/terraform/plugin/convert" + "github.com/hashicorp/terraform/provisioners" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/msgpack" + "google.golang.org/grpc" +) + +// GRPCProvisionerPlugin is the plugin.GRPCPlugin implementation. +type GRPCProvisionerPlugin struct { + plugin.Plugin + GRPCProvisioner func() proto.ProvisionerServer +} + +func (p *GRPCProvisionerPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { + return &GRPCProvisioner{ + client: proto.NewProvisionerClient(c), + ctx: ctx, + }, nil +} + +func (p *GRPCProvisionerPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { + proto.RegisterProvisionerServer(s, p.GRPCProvisioner()) + return nil +} + +// provisioners.Interface grpc implementation +type GRPCProvisioner struct { + // PluginClient provides a reference to the plugin.Client which controls the plugin process. + // This allows the GRPCProvider a way to shutdown the plugin process. + PluginClient *plugin.Client + + client proto.ProvisionerClient + ctx context.Context + + // Cache the schema since we need it for serialization in each method call. + mu sync.Mutex + schema *configschema.Block +} + +func (p *GRPCProvisioner) GetSchema() (resp provisioners.GetSchemaResponse) { + p.mu.Lock() + defer p.mu.Unlock() + + if p.schema != nil { + return provisioners.GetSchemaResponse{ + Provisioner: p.schema, + } + } + + protoResp, err := p.client.GetSchema(p.ctx, new(proto.GetProvisionerSchema_Request)) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + if protoResp.Provisioner == nil { + resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing provisioner schema")) + return resp + } + + resp.Provisioner = convert.ProtoToConfigSchema(protoResp.Provisioner.Block) + + p.schema = resp.Provisioner + + return resp +} + +func (p *GRPCProvisioner) ValidateProvisionerConfig(r provisioners.ValidateProvisionerConfigRequest) (resp provisioners.ValidateProvisionerConfigResponse) { + schema := p.GetSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = resp.Diagnostics.Append(schema.Diagnostics) + return resp + } + + mp, err := msgpack.Marshal(r.Config, schema.Provisioner.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ValidateProvisionerConfig_Request{ + Config: &proto.DynamicValue{Msgpack: mp}, + } + protoResp, err := p.client.ValidateProvisionerConfig(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvisioner) ProvisionResource(r provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + schema := p.GetSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = resp.Diagnostics.Append(schema.Diagnostics) + return resp + } + + mp, err := msgpack.Marshal(r.Config, schema.Provisioner.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + // connection is always assumed to be a simple string map + connMP, err := msgpack.Marshal(r.Connection, cty.Map(cty.String)) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ProvisionResource_Request{ + Config: &proto.DynamicValue{Msgpack: mp}, + Connection: &proto.DynamicValue{Msgpack: connMP}, + } + + outputClient, err := p.client.ProvisionResource(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + for { + rcv, err := outputClient.Recv() + if rcv != nil { + r.UIOutput.Output(rcv.Output) + } + if err != nil { + if err != io.EOF { + resp.Diagnostics = resp.Diagnostics.Append(err) + } + break + } + + if len(rcv.Diagnostics) > 0 { + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(rcv.Diagnostics)) + break + } + } + + return resp +} + +func (p *GRPCProvisioner) Stop() error { + protoResp, err := p.client.Stop(p.ctx, &proto.Stop_Request{}) + if err != nil { + return err + } + if protoResp.Error != "" { + return errors.New(protoResp.Error) + } + return nil +} + +func (p *GRPCProvisioner) Close() error { + // check this since it's not automatically inserted during plugin creation + if p.PluginClient == nil { + log.Println("[DEBUG] provider has no plugin.Client") + return nil + } + + p.PluginClient.Kill() + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/plugin.go b/vendor/github.com/hashicorp/terraform/plugin/plugin.go index 00fa7b2967..e4fb577619 100644 --- a/vendor/github.com/hashicorp/terraform/plugin/plugin.go +++ b/vendor/github.com/hashicorp/terraform/plugin/plugin.go @@ -6,8 +6,9 @@ import ( // See serve.go for serving plugins -// PluginMap should be used by clients for the map of plugins. -var PluginMap = map[string]plugin.Plugin{ - "provider": &ResourceProviderPlugin{}, - "provisioner": &ResourceProvisionerPlugin{}, +var VersionedPlugins = map[int]plugin.PluginSet{ + 5: { + "provider": &GRPCProviderPlugin{}, + "provisioner": &GRPCProvisionerPlugin{}, + }, } diff --git a/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go b/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go index d6a433c4e5..459661a550 100644 --- a/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go +++ b/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go @@ -9,11 +9,14 @@ import ( // ResourceProviderPlugin is the plugin.Plugin implementation. type ResourceProviderPlugin struct { - F func() terraform.ResourceProvider + ResourceProvider func() terraform.ResourceProvider } func (p *ResourceProviderPlugin) Server(b *plugin.MuxBroker) (interface{}, error) { - return &ResourceProviderServer{Broker: b, Provider: p.F()}, nil + return &ResourceProviderServer{ + Broker: b, + Provider: p.ResourceProvider(), + }, nil } func (p *ResourceProviderPlugin) Client( diff --git a/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go index 8fce9d8ae7..f0cc341f13 100644 --- a/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go +++ b/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go @@ -4,16 +4,20 @@ import ( "net/rpc" "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/terraform" ) // ResourceProvisionerPlugin is the plugin.Plugin implementation. type ResourceProvisionerPlugin struct { - F func() terraform.ResourceProvisioner + ResourceProvisioner func() terraform.ResourceProvisioner } func (p *ResourceProvisionerPlugin) Server(b *plugin.MuxBroker) (interface{}, error) { - return &ResourceProvisionerServer{Broker: b, Provisioner: p.F()}, nil + return &ResourceProvisionerServer{ + Broker: b, + Provisioner: p.ResourceProvisioner(), + }, nil } func (p *ResourceProvisionerPlugin) Client( @@ -28,6 +32,11 @@ type ResourceProvisioner struct { Client *rpc.Client } +func (p *ResourceProvisioner) GetConfigSchema() (*configschema.Block, error) { + panic("not implemented") + return nil, nil +} + func (p *ResourceProvisioner) Validate(c *terraform.ResourceConfig) ([]string, []error) { var resp ResourceProvisionerValidateResponse args := ResourceProvisionerValidateArgs{ diff --git a/vendor/github.com/hashicorp/terraform/plugin/serve.go b/vendor/github.com/hashicorp/terraform/plugin/serve.go index 2028a613ff..8d056c591a 100644 --- a/vendor/github.com/hashicorp/terraform/plugin/serve.go +++ b/vendor/github.com/hashicorp/terraform/plugin/serve.go @@ -2,14 +2,23 @@ package plugin import ( "github.com/hashicorp/go-plugin" + grpcplugin "github.com/hashicorp/terraform/helper/plugin" + proto "github.com/hashicorp/terraform/internal/tfplugin5" "github.com/hashicorp/terraform/terraform" ) -// The constants below are the names of the plugins that can be dispensed -// from the plugin server. const ( + // The constants below are the names of the plugins that can be dispensed + // from the plugin server. ProviderPluginName = "provider" ProvisionerPluginName = "provisioner" + + // DefaultProtocolVersion is the protocol version assumed for legacy clients that don't specify + // a particular version during their handshake. This is the version used when Terraform 0.10 + // and 0.11 launch plugins that were built with support for both versions 4 and 5, and must + // stay unchanged at 4 until we intentionally build plugins that are not compatible with 0.10 and + // 0.11. + DefaultProtocolVersion = 4 ) // Handshake is the HandshakeConfig used to configure clients and servers. @@ -19,7 +28,7 @@ var Handshake = plugin.HandshakeConfig{ // one or the other that makes it so that they can't safely communicate. // This could be adding a new interface value, it could be how // helper/schema computes diffs, etc. - ProtocolVersion: 4, + ProtocolVersion: DefaultProtocolVersion, // The magic cookie values should NEVER be changed. MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE", @@ -28,27 +37,85 @@ var Handshake = plugin.HandshakeConfig{ type ProviderFunc func() terraform.ResourceProvider type ProvisionerFunc func() terraform.ResourceProvisioner +type GRPCProviderFunc func() proto.ProviderServer +type GRPCProvisionerFunc func() proto.ProvisionerServer // ServeOpts are the configurations to serve a plugin. type ServeOpts struct { ProviderFunc ProviderFunc ProvisionerFunc ProvisionerFunc + + // Wrapped versions of the above plugins will automatically shimmed and + // added to the GRPC functions when possible. + GRPCProviderFunc GRPCProviderFunc + GRPCProvisionerFunc GRPCProvisionerFunc } // Serve serves a plugin. This function never returns and should be the final // function called in the main function of the plugin. func Serve(opts *ServeOpts) { + // since the plugins may not yet be aware of the new protocol, we + // automatically wrap the plugins in the grpc shims. + if opts.GRPCProviderFunc == nil && opts.ProviderFunc != nil { + provider := grpcplugin.NewGRPCProviderServerShim(opts.ProviderFunc()) + // this is almost always going to be a *schema.Provider, but check that + // we got back a valid provider just in case. + if provider != nil { + opts.GRPCProviderFunc = func() proto.ProviderServer { + return provider + } + } + } + if opts.GRPCProvisionerFunc == nil && opts.ProvisionerFunc != nil { + provisioner := grpcplugin.NewGRPCProvisionerServerShim(opts.ProvisionerFunc()) + if provisioner != nil { + opts.GRPCProvisionerFunc = func() proto.ProvisionerServer { + return provisioner + } + } + } + plugin.Serve(&plugin.ServeConfig{ - HandshakeConfig: Handshake, - Plugins: pluginMap(opts), + HandshakeConfig: Handshake, + VersionedPlugins: pluginSet(opts), + GRPCServer: plugin.DefaultGRPCServer, }) } -// pluginMap returns the map[string]plugin.Plugin to use for configuring a plugin -// server or client. -func pluginMap(opts *ServeOpts) map[string]plugin.Plugin { +// pluginMap returns the legacy map[string]plugin.Plugin to use for configuring +// a plugin server or client. +func legacyPluginMap(opts *ServeOpts) map[string]plugin.Plugin { return map[string]plugin.Plugin{ - "provider": &ResourceProviderPlugin{F: opts.ProviderFunc}, - "provisioner": &ResourceProvisionerPlugin{F: opts.ProvisionerFunc}, + "provider": &ResourceProviderPlugin{ + ResourceProvider: opts.ProviderFunc, + }, + "provisioner": &ResourceProvisionerPlugin{ + ResourceProvisioner: opts.ProvisionerFunc, + }, + } +} + +func pluginSet(opts *ServeOpts) map[int]plugin.PluginSet { + // Set the legacy netrpc plugins at version 4. + // The oldest version is returned in when executed by a legacy go-plugin + // client. + plugins := map[int]plugin.PluginSet{ + 4: legacyPluginMap(opts), + } + + // add the new protocol versions if they're configured + if opts.GRPCProviderFunc != nil || opts.GRPCProvisionerFunc != nil { + plugins[5] = plugin.PluginSet{} + if opts.GRPCProviderFunc != nil { + plugins[5]["provider"] = &GRPCProviderPlugin{ + GRPCProvider: opts.GRPCProviderFunc, + } + } + if opts.GRPCProvisionerFunc != nil { + plugins[5]["provisioner"] = &GRPCProvisionerPlugin{ + GRPCProvisioner: opts.GRPCProvisionerFunc, + } + } } + return plugins } diff --git a/vendor/github.com/hashicorp/terraform/plugin/ui_input.go b/vendor/github.com/hashicorp/terraform/plugin/ui_input.go index 493efc0a91..3469e6a96b 100644 --- a/vendor/github.com/hashicorp/terraform/plugin/ui_input.go +++ b/vendor/github.com/hashicorp/terraform/plugin/ui_input.go @@ -1,19 +1,20 @@ package plugin import ( + "context" "net/rpc" "github.com/hashicorp/go-plugin" "github.com/hashicorp/terraform/terraform" ) -// UIInput is an implementatin of terraform.UIInput that communicates +// UIInput is an implementation of terraform.UIInput that communicates // over RPC. type UIInput struct { Client *rpc.Client } -func (i *UIInput) Input(opts *terraform.InputOpts) (string, error) { +func (i *UIInput) Input(ctx context.Context, opts *terraform.InputOpts) (string, error) { var resp UIInputInputResponse err := i.Client.Call("Plugin.Input", opts, &resp) if err != nil { @@ -41,7 +42,7 @@ type UIInputServer struct { func (s *UIInputServer) Input( opts *terraform.InputOpts, reply *UIInputInputResponse) error { - value, err := s.UIInput.Input(opts) + value, err := s.UIInput.Input(context.Background(), opts) *reply = UIInputInputResponse{ Value: value, Error: plugin.NewBasicError(err), diff --git a/vendor/github.com/hashicorp/terraform/providers/addressed_types.go b/vendor/github.com/hashicorp/terraform/providers/addressed_types.go new file mode 100644 index 0000000000..7ed523f158 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/providers/addressed_types.go @@ -0,0 +1,47 @@ +package providers + +import ( + "sort" + + "github.com/hashicorp/terraform/addrs" +) + +// AddressedTypes is a helper that extracts all of the distinct provider +// types from the given list of relative provider configuration addresses. +func AddressedTypes(providerAddrs []addrs.ProviderConfig) []string { + if len(providerAddrs) == 0 { + return nil + } + m := map[string]struct{}{} + for _, addr := range providerAddrs { + m[addr.Type] = struct{}{} + } + + names := make([]string, 0, len(m)) + for typeName := range m { + names = append(names, typeName) + } + + sort.Strings(names) // Stable result for tests + return names +} + +// AddressedTypesAbs is a helper that extracts all of the distinct provider +// types from the given list of absolute provider configuration addresses. +func AddressedTypesAbs(providerAddrs []addrs.AbsProviderConfig) []string { + if len(providerAddrs) == 0 { + return nil + } + m := map[string]struct{}{} + for _, addr := range providerAddrs { + m[addr.ProviderConfig.Type] = struct{}{} + } + + names := make([]string, 0, len(m)) + for typeName := range m { + names = append(names, typeName) + } + + sort.Strings(names) // Stable result for tests + return names +} diff --git a/vendor/github.com/hashicorp/terraform/providers/doc.go b/vendor/github.com/hashicorp/terraform/providers/doc.go new file mode 100644 index 0000000000..39aa1de60f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/providers/doc.go @@ -0,0 +1,3 @@ +// Package providers contains the interface and primary types required to +// implement a Terraform resource provider. +package providers diff --git a/vendor/github.com/hashicorp/terraform/providers/provider.go b/vendor/github.com/hashicorp/terraform/providers/provider.go new file mode 100644 index 0000000000..1aa08c271d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/providers/provider.go @@ -0,0 +1,351 @@ +package providers + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" +) + +// Interface represents the set of methods required for a complete resource +// provider plugin. +type Interface interface { + // GetSchema returns the complete schema for the provider. + GetSchema() GetSchemaResponse + + // PrepareProviderConfig allows the provider to validate the configuration + // values, and set or override any values with defaults. + PrepareProviderConfig(PrepareProviderConfigRequest) PrepareProviderConfigResponse + + // ValidateResourceTypeConfig allows the provider to validate the resource + // configuration values. + ValidateResourceTypeConfig(ValidateResourceTypeConfigRequest) ValidateResourceTypeConfigResponse + + // ValidateDataSource allows the provider to validate the data source + // configuration values. + ValidateDataSourceConfig(ValidateDataSourceConfigRequest) ValidateDataSourceConfigResponse + + // UpgradeResourceState is called when the state loader encounters an + // instance state whose schema version is less than the one reported by the + // currently-used version of the corresponding provider, and the upgraded + // result is used for any further processing. + UpgradeResourceState(UpgradeResourceStateRequest) UpgradeResourceStateResponse + + // Configure configures and initialized the provider. + Configure(ConfigureRequest) ConfigureResponse + + // Stop is called when the provider should halt any in-flight actions. + // + // Stop should not block waiting for in-flight actions to complete. It + // should take any action it wants and return immediately acknowledging it + // has received the stop request. Terraform will not make any further API + // calls to the provider after Stop is called. + // + // The error returned, if non-nil, is assumed to mean that signaling the + // stop somehow failed and that the user should expect potentially waiting + // a longer period of time. + Stop() error + + // ReadResource refreshes a resource and returns its current state. + ReadResource(ReadResourceRequest) ReadResourceResponse + + // PlanResourceChange takes the current state and proposed state of a + // resource, and returns the planned final state. + PlanResourceChange(PlanResourceChangeRequest) PlanResourceChangeResponse + + // ApplyResourceChange takes the planned state for a resource, which may + // yet contain unknown computed values, and applies the changes returning + // the final state. + ApplyResourceChange(ApplyResourceChangeRequest) ApplyResourceChangeResponse + + // ImportResourceState requests that the given resource be imported. + ImportResourceState(ImportResourceStateRequest) ImportResourceStateResponse + + // ReadDataSource returns the data source's current state. + ReadDataSource(ReadDataSourceRequest) ReadDataSourceResponse + + // Close shuts down the plugin process if applicable. + Close() error +} + +type GetSchemaResponse struct { + // Provider is the schema for the provider itself. + Provider Schema + + // ResourceTypes map the resource type name to that type's schema. + ResourceTypes map[string]Schema + + // DataSources maps the data source name to that data source's schema. + DataSources map[string]Schema + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +// Schema pairs a provider or resource schema with that schema's version. +// This is used to be able to upgrade the schema in UpgradeResourceState. +type Schema struct { + Version int64 + Block *configschema.Block +} + +type PrepareProviderConfigRequest struct { + // Config is the raw configuration value for the provider. + Config cty.Value +} + +type PrepareProviderConfigResponse struct { + // PreparedConfig is the configuration as prepared by the provider. + PreparedConfig cty.Value + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ValidateResourceTypeConfigRequest struct { + // TypeName is the name of the resource type to validate. + TypeName string + + // Config is the configuration value to validate, which may contain unknown + // values. + Config cty.Value +} + +type ValidateResourceTypeConfigResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ValidateDataSourceConfigRequest struct { + // TypeName is the name of the data source type to validate. + TypeName string + + // Config is the configuration value to validate, which may contain unknown + // values. + Config cty.Value +} + +type ValidateDataSourceConfigResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type UpgradeResourceStateRequest struct { + // TypeName is the name of the resource type being upgraded + TypeName string + + // Version is version of the schema that created the current state. + Version int64 + + // RawStateJSON and RawStateFlatmap contiain the state that needs to be + // upgraded to match the current schema version. Because the schema is + // unknown, this contains only the raw data as stored in the state. + // RawStateJSON is the current json state encoding. + // RawStateFlatmap is the legacy flatmap encoding. + // Only on of these fields may be set for the upgrade request. + RawStateJSON []byte + RawStateFlatmap map[string]string +} + +type UpgradeResourceStateResponse struct { + // UpgradedState is the newly upgraded resource state. + UpgradedState cty.Value + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ConfigureRequest struct { + // Terraform version is the version string from the running instance of + // terraform. Providers can use TerraformVersion to verify compatibility, + // and to store for informational purposes. + TerraformVersion string + + // Config is the complete configuration value for the provider. + Config cty.Value +} + +type ConfigureResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ReadResourceRequest struct { + // TypeName is the name of the resource type being read. + TypeName string + + // PriorState contains the previously saved state value for this resource. + PriorState cty.Value +} + +type ReadResourceResponse struct { + // NewState contains the current state of the resource. + NewState cty.Value + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type PlanResourceChangeRequest struct { + // TypeName is the name of the resource type to plan. + TypeName string + + // PriorState is the previously saved state value for this resource. + PriorState cty.Value + + // ProposedNewState is the expected state after the new configuration is + // applied. This is created by directly applying the configuration to the + // PriorState. The provider is then responsible for applying any further + // changes required to create the proposed final state. + ProposedNewState cty.Value + + // Config is the resource configuration, before being merged with the + // PriorState. Any value not explicitly set in the configuration will be + // null. Config is supplied for reference, but Provider implementations + // should prefer the ProposedNewState in most circumstances. + Config cty.Value + + // PriorPrivate is the previously saved private data returned from the + // provider during the last apply. + PriorPrivate []byte +} + +type PlanResourceChangeResponse struct { + // PlannedState is the expected state of the resource once the current + // configuration is applied. + PlannedState cty.Value + + // RequiresReplace is the list of thee attributes that are requiring + // resource replacement. + RequiresReplace []cty.Path + + // PlannedPrivate is an opaque blob that is not interpreted by terraform + // core. This will be saved and relayed back to the provider during + // ApplyResourceChange. + PlannedPrivate []byte + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics + + // LegacyTypeSystem is set only if the provider is using the legacy SDK + // whose type system cannot be precisely mapped into the Terraform type + // system. We use this to bypass certain consistency checks that would + // otherwise fail due to this imprecise mapping. No other provider or SDK + // implementation is permitted to set this. + LegacyTypeSystem bool +} + +type ApplyResourceChangeRequest struct { + // TypeName is the name of the resource type being applied. + TypeName string + + // PriorState is the current state of resource. + PriorState cty.Value + + // Planned state is the state returned from PlanResourceChange, and should + // represent the new state, minus any remaining computed attributes. + PlannedState cty.Value + + // Config is the resource configuration, before being merged with the + // PriorState. Any value not explicitly set in the configuration will be + // null. Config is supplied for reference, but Provider implementations + // should prefer the PlannedState in most circumstances. + Config cty.Value + + // PlannedPrivate is the same value as returned by PlanResourceChange. + PlannedPrivate []byte +} + +type ApplyResourceChangeResponse struct { + // NewState is the new complete state after applying the planned change. + // In the event of an error, NewState should represent the most recent + // known state of the resource, if it exists. + NewState cty.Value + + // Private is an opaque blob that will be stored in state along with the + // resource. It is intended only for interpretation by the provider itself. + Private []byte + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics + + // LegacyTypeSystem is set only if the provider is using the legacy SDK + // whose type system cannot be precisely mapped into the Terraform type + // system. We use this to bypass certain consistency checks that would + // otherwise fail due to this imprecise mapping. No other provider or SDK + // implementation is permitted to set this. + LegacyTypeSystem bool +} + +type ImportResourceStateRequest struct { + // TypeName is the name of the resource type to be imported. + TypeName string + + // ID is a string with which the provider can identify the resource to be + // imported. + ID string +} + +type ImportResourceStateResponse struct { + // ImportedResources contains one or more state values related to the + // imported resource. It is not required that these be complete, only that + // there is enough identifying information for the provider to successfully + // update the states in ReadResource. + ImportedResources []ImportedResource + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +// ImportedResource represents an object being imported into Terraform with the +// help of a provider. An ImportedObject is a RemoteObject that has been read +// by the provider's import handler but hasn't yet been committed to state. +type ImportedResource struct { + // TypeName is the name of the resource type associated with the + // returned state. It's possible for providers to import multiple related + // types with a single import request. + TypeName string + + // State is the state of the remote object being imported. This may not be + // complete, but must contain enough information to uniquely identify the + // resource. + State cty.Value + + // Private is an opaque blob that will be stored in state along with the + // resource. It is intended only for interpretation by the provider itself. + Private []byte +} + +// AsInstanceObject converts the receiving ImportedObject into a +// ResourceInstanceObject that has status ObjectReady. +// +// The returned object does not know its own resource type, so the caller must +// retain the ResourceType value from the source object if this information is +// needed. +// +// The returned object also has no dependency addresses, but the caller may +// freely modify the direct fields of the returned object without affecting +// the receiver. +func (ir ImportedResource) AsInstanceObject() *states.ResourceInstanceObject { + return &states.ResourceInstanceObject{ + Status: states.ObjectReady, + Value: ir.State, + Private: ir.Private, + } +} + +type ReadDataSourceRequest struct { + // TypeName is the name of the data source type to Read. + TypeName string + + // Config is the complete configuration for the requested data source. + Config cty.Value +} + +type ReadDataSourceResponse struct { + // State is the current state of the requested data source. + State cty.Value + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} diff --git a/vendor/github.com/hashicorp/terraform/providers/resolver.go b/vendor/github.com/hashicorp/terraform/providers/resolver.go new file mode 100644 index 0000000000..4de8e0acd8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/providers/resolver.go @@ -0,0 +1,112 @@ +package providers + +import ( + "fmt" + + "github.com/hashicorp/terraform/plugin/discovery" +) + +// Resolver is an interface implemented by objects that are able to resolve +// a given set of resource provider version constraints into Factory +// callbacks. +type Resolver interface { + // Given a constraint map, return a Factory for each requested provider. + // If some or all of the constraints cannot be satisfied, return a non-nil + // slice of errors describing the problems. + ResolveProviders(reqd discovery.PluginRequirements) (map[string]Factory, []error) +} + +// ResolverFunc wraps a callback function and turns it into a Resolver +// implementation, for convenience in situations where a function and its +// associated closure are sufficient as a resolver implementation. +type ResolverFunc func(reqd discovery.PluginRequirements) (map[string]Factory, []error) + +// ResolveProviders implements Resolver by calling the +// wrapped function. +func (f ResolverFunc) ResolveProviders(reqd discovery.PluginRequirements) (map[string]Factory, []error) { + return f(reqd) +} + +// ResolverFixed returns a Resolver that has a fixed set of provider factories +// provided by the caller. The returned resolver ignores version constraints +// entirely and just returns the given factory for each requested provider +// name. +// +// This function is primarily used in tests, to provide mock providers or +// in-process providers under test. +func ResolverFixed(factories map[string]Factory) Resolver { + return ResolverFunc(func(reqd discovery.PluginRequirements) (map[string]Factory, []error) { + ret := make(map[string]Factory, len(reqd)) + var errs []error + for name := range reqd { + if factory, exists := factories[name]; exists { + ret[name] = factory + } else { + errs = append(errs, fmt.Errorf("provider %q is not available", name)) + } + } + return ret, errs + }) +} + +// Factory is a function type that creates a new instance of a resource +// provider, or returns an error if that is impossible. +type Factory func() (Interface, error) + +// FactoryFixed is a helper that creates a Factory that just returns some given +// single provider. +// +// Unlike usual factories, the exact same instance is returned for each call +// to the factory and so this must be used in only specialized situations where +// the caller can take care to either not mutate the given provider at all +// or to mutate it in ways that will not cause unexpected behavior for others +// holding the same reference. +func FactoryFixed(p Interface) Factory { + return func() (Interface, error) { + return p, nil + } +} + +// ProviderHasResource is a helper that requests schema from the given provider +// and checks if it has a resource type of the given name. +// +// This function is more expensive than it may first appear since it must +// retrieve the entire schema from the underlying provider, and so it should +// be used sparingly and especially not in tight loops. +// +// Since retrieving the provider may fail (e.g. if the provider is accessed +// over an RPC channel that has operational problems), this function will +// return false if the schema cannot be retrieved, under the assumption that +// a subsequent call to do anything with the resource type would fail +// anyway. +func ProviderHasResource(provider Interface, typeName string) bool { + resp := provider.GetSchema() + if resp.Diagnostics.HasErrors() { + return false + } + + _, exists := resp.ResourceTypes[typeName] + return exists +} + +// ProviderHasDataSource is a helper that requests schema from the given +// provider and checks if it has a data source of the given name. +// +// This function is more expensive than it may first appear since it must +// retrieve the entire schema from the underlying provider, and so it should +// be used sparingly and especially not in tight loops. +// +// Since retrieving the provider may fail (e.g. if the provider is accessed +// over an RPC channel that has operational problems), this function will +// return false if the schema cannot be retrieved, under the assumption that +// a subsequent call to do anything with the data source would fail +// anyway. +func ProviderHasDataSource(provider Interface, dataSourceName string) bool { + resp := provider.GetSchema() + if resp.Diagnostics.HasErrors() { + return false + } + + _, exists := resp.DataSources[dataSourceName] + return exists +} diff --git a/vendor/github.com/hashicorp/terraform/provisioners/doc.go b/vendor/github.com/hashicorp/terraform/provisioners/doc.go new file mode 100644 index 0000000000..b03ba9a1bb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/provisioners/doc.go @@ -0,0 +1,3 @@ +// Package provisioners contains the interface and primary types to implement a +// Terraform resource provisioner. +package provisioners diff --git a/vendor/github.com/hashicorp/terraform/provisioners/factory.go b/vendor/github.com/hashicorp/terraform/provisioners/factory.go new file mode 100644 index 0000000000..590b97a84f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/provisioners/factory.go @@ -0,0 +1,19 @@ +package provisioners + +// Factory is a function type that creates a new instance of a resource +// provisioner, or returns an error if that is impossible. +type Factory func() (Interface, error) + +// FactoryFixed is a helper that creates a Factory that just returns some given +// single provisioner. +// +// Unlike usual factories, the exact same instance is returned for each call +// to the factory and so this must be used in only specialized situations where +// the caller can take care to either not mutate the given provider at all +// or to mutate it in ways that will not cause unexpected behavior for others +// holding the same reference. +func FactoryFixed(p Interface) Factory { + return func() (Interface, error) { + return p, nil + } +} diff --git a/vendor/github.com/hashicorp/terraform/provisioners/provisioner.go b/vendor/github.com/hashicorp/terraform/provisioners/provisioner.go new file mode 100644 index 0000000000..e53c88488e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/provisioners/provisioner.go @@ -0,0 +1,82 @@ +package provisioners + +import ( + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// Interface is the set of methods required for a resource provisioner plugin. +type Interface interface { + // GetSchema returns the schema for the provisioner configuration. + GetSchema() GetSchemaResponse + + // ValidateProvisionerConfig allows the provisioner to validate the + // configuration values. + ValidateProvisionerConfig(ValidateProvisionerConfigRequest) ValidateProvisionerConfigResponse + + // ProvisionResource runs the provisioner with provided configuration. + // ProvisionResource blocks until the execution is complete. + // If the returned diagnostics contain any errors, the resource will be + // left in a tainted state. + ProvisionResource(ProvisionResourceRequest) ProvisionResourceResponse + + // Stop is called to interrupt the provisioner. + // + // Stop should not block waiting for in-flight actions to complete. It + // should take any action it wants and return immediately acknowledging it + // has received the stop request. Terraform will not make any further API + // calls to the provisioner after Stop is called. + // + // The error returned, if non-nil, is assumed to mean that signaling the + // stop somehow failed and that the user should expect potentially waiting + // a longer period of time. + Stop() error + + // Close shuts down the plugin process if applicable. + Close() error +} + +type GetSchemaResponse struct { + // Provisioner contains the schema for this provisioner. + Provisioner *configschema.Block + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +// UIOutput provides the Output method for resource provisioner +// plugins to write any output to the UI. +// +// Provisioners may call the Output method multiple times while Apply is in +// progress. It is invalid to call Output after Apply returns. +type UIOutput interface { + Output(string) +} + +type ValidateProvisionerConfigRequest struct { + // Config is the complete configuration to be used for the provisioner. + Config cty.Value +} + +type ValidateProvisionerConfigResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ProvisionResourceRequest struct { + // Config is the complete provisioner configuration. + Config cty.Value + + // Connection contains any information required to access the resource + // instance. + Connection cty.Value + + // UIOutput is used to return output during the Apply operation. + UIOutput UIOutput +} + +type ProvisionResourceResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} diff --git a/vendor/github.com/hashicorp/terraform/registry/client.go b/vendor/github.com/hashicorp/terraform/registry/client.go index 8e31a6a3e2..93424d1764 100644 --- a/vendor/github.com/hashicorp/terraform/registry/client.go +++ b/vendor/github.com/hashicorp/terraform/registry/client.go @@ -20,10 +20,11 @@ import ( ) const ( - xTerraformGet = "X-Terraform-Get" - xTerraformVersion = "X-Terraform-Version" - requestTimeout = 10 * time.Second - serviceID = "modules.v1" + xTerraformGet = "X-Terraform-Get" + xTerraformVersion = "X-Terraform-Version" + requestTimeout = 10 * time.Second + modulesServiceID = "modules.v1" + providersServiceID = "providers.v1" ) var tfVersion = version.String() @@ -57,28 +58,28 @@ func NewClient(services *disco.Disco, client *http.Client) *Client { } } -// Discover qeuries the host, and returns the url for the registry. -func (c *Client) Discover(host svchost.Hostname) *url.URL { - service := c.services.DiscoverServiceURL(host, serviceID) - if service == nil { - return nil +// Discover queries the host, and returns the url for the registry. +func (c *Client) Discover(host svchost.Hostname, serviceID string) (*url.URL, error) { + service, err := c.services.DiscoverServiceURL(host, serviceID) + if err != nil { + return nil, &ServiceUnreachableError{err} } if !strings.HasSuffix(service.Path, "/") { service.Path += "/" } - return service + return service, nil } -// Versions queries the registry for a module, and returns the available versions. -func (c *Client) Versions(module *regsrc.Module) (*response.ModuleVersions, error) { +// ModuleVersions queries the registry for a module, and returns the available versions. +func (c *Client) ModuleVersions(module *regsrc.Module) (*response.ModuleVersions, error) { host, err := module.SvcHost() if err != nil { return nil, err } - service := c.Discover(host) - if service == nil { - return nil, fmt.Errorf("host %s does not provide Terraform modules", host) + service, err := c.Discover(host, modulesServiceID) + if err != nil { + return nil, err } p, err := url.Parse(path.Join(module.Module(), "versions")) @@ -141,17 +142,17 @@ func (c *Client) addRequestCreds(host svchost.Hostname, req *http.Request) { } } -// Location find the download location for a specific version module. +// ModuleLocation find the download location for a specific version module. // This returns a string, because the final location may contain special go-getter syntax. -func (c *Client) Location(module *regsrc.Module, version string) (string, error) { +func (c *Client) ModuleLocation(module *regsrc.Module, version string) (string, error) { host, err := module.SvcHost() if err != nil { return "", err } - service := c.Discover(host) - if service == nil { - return "", fmt.Errorf("host %s does not provide Terraform modules", host.ForDisplay()) + service, err := c.Discover(host, modulesServiceID) + if err != nil { + return "", err } var p *url.URL @@ -225,3 +226,118 @@ func (c *Client) Location(module *regsrc.Module, version string) (string, error) return location, nil } + +// TerraformProviderVersions queries the registry for a provider, and returns the available versions. +func (c *Client) TerraformProviderVersions(provider *regsrc.TerraformProvider) (*response.TerraformProviderVersions, error) { + host, err := provider.SvcHost() + if err != nil { + return nil, err + } + + service, err := c.Discover(host, providersServiceID) + if err != nil { + return nil, err + } + + p, err := url.Parse(path.Join(provider.TerraformProvider(), "versions")) + if err != nil { + return nil, err + } + + service = service.ResolveReference(p) + + log.Printf("[DEBUG] fetching provider versions from %q", service) + + req, err := http.NewRequest("GET", service.String(), nil) + if err != nil { + return nil, err + } + + c.addRequestCreds(host, req) + req.Header.Set(xTerraformVersion, tfVersion) + + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + // OK + case http.StatusNotFound: + return nil, &errProviderNotFound{addr: provider} + default: + return nil, fmt.Errorf("error looking up provider versions: %s", resp.Status) + } + + var versions response.TerraformProviderVersions + + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&versions); err != nil { + return nil, err + } + + return &versions, nil +} + +// TerraformProviderLocation queries the registry for a provider download metadata +func (c *Client) TerraformProviderLocation(provider *regsrc.TerraformProvider, version string) (*response.TerraformProviderPlatformLocation, error) { + host, err := provider.SvcHost() + if err != nil { + return nil, err + } + + service, err := c.Discover(host, providersServiceID) + if err != nil { + return nil, err + } + + p, err := url.Parse(path.Join( + provider.TerraformProvider(), + version, + "download", + provider.OS, + provider.Arch, + )) + if err != nil { + return nil, err + } + + service = service.ResolveReference(p) + + log.Printf("[DEBUG] fetching provider location from %q", service) + + req, err := http.NewRequest("GET", service.String(), nil) + if err != nil { + return nil, err + } + + c.addRequestCreds(host, req) + req.Header.Set(xTerraformVersion, tfVersion) + + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var loc response.TerraformProviderPlatformLocation + + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&loc); err != nil { + return nil, err + } + + switch resp.StatusCode { + case http.StatusOK, http.StatusNoContent: + // OK + case http.StatusNotFound: + return nil, fmt.Errorf("provider %q version %q not found", provider.TerraformProvider(), version) + default: + // anything else is an error: + return nil, fmt.Errorf("error getting download location for %q: %s", provider.TerraformProvider(), resp.Status) + } + + return &loc, nil +} diff --git a/vendor/github.com/hashicorp/terraform/registry/errors.go b/vendor/github.com/hashicorp/terraform/registry/errors.go index b8dcd31e3b..5a6a31b080 100644 --- a/vendor/github.com/hashicorp/terraform/registry/errors.go +++ b/vendor/github.com/hashicorp/terraform/registry/errors.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/hashicorp/terraform/registry/regsrc" + "github.com/hashicorp/terraform/svchost/disco" ) type errModuleNotFound struct { @@ -21,3 +22,42 @@ func IsModuleNotFound(err error) bool { _, ok := err.(*errModuleNotFound) return ok } + +type errProviderNotFound struct { + addr *regsrc.TerraformProvider +} + +func (e *errProviderNotFound) Error() string { + return fmt.Sprintf("provider %s not found", e.addr) +} + +// IsProviderNotFound returns true only if the given error is a "provider not found" +// error. This allows callers to recognize this particular error condition +// as distinct from operational errors such as poor network connectivity. +func IsProviderNotFound(err error) bool { + _, ok := err.(*errProviderNotFound) + return ok +} + +// IsServiceNotProvided returns true only if the given error is a "service not provided" +// error. This allows callers to recognize this particular error condition +// as distinct from operational errors such as poor network connectivity. +func IsServiceNotProvided(err error) bool { + _, ok := err.(*disco.ErrServiceNotProvided) + return ok +} + +// ServiceUnreachableError Registry service is unreachable +type ServiceUnreachableError struct { + err error +} + +func (e *ServiceUnreachableError) Error() string { + return e.err.Error() +} + +// IsServiceUnreachable returns true if the registry/discovery service was unreachable +func IsServiceUnreachable(err error) bool { + _, ok := err.(*ServiceUnreachableError) + return ok +} diff --git a/vendor/github.com/hashicorp/terraform/registry/regsrc/terraform_provider.go b/vendor/github.com/hashicorp/terraform/registry/regsrc/terraform_provider.go new file mode 100644 index 0000000000..58dedee5ed --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/regsrc/terraform_provider.go @@ -0,0 +1,60 @@ +package regsrc + +import ( + "fmt" + "runtime" + "strings" + + "github.com/hashicorp/terraform/svchost" +) + +var ( + // DefaultProviderNamespace represents the namespace for canonical + // HashiCorp-controlled providers. + DefaultProviderNamespace = "-" +) + +// TerraformProvider describes a Terraform Registry Provider source. +type TerraformProvider struct { + RawHost *FriendlyHost + RawNamespace string + RawName string + OS string + Arch string +} + +// NewTerraformProvider constructs a new provider source. +func NewTerraformProvider(name, os, arch string) *TerraformProvider { + if os == "" { + os = runtime.GOOS + } + if arch == "" { + arch = runtime.GOARCH + } + + // separate namespace if included + namespace := DefaultProviderNamespace + if names := strings.SplitN(name, "/", 2); len(names) == 2 { + namespace, name = names[0], names[1] + } + p := &TerraformProvider{ + RawHost: PublicRegistryHost, + RawNamespace: namespace, + RawName: name, + OS: os, + Arch: arch, + } + + return p +} + +// Provider returns just the registry ID of the provider +func (p *TerraformProvider) TerraformProvider() string { + return fmt.Sprintf("%s/%s", p.RawNamespace, p.RawName) +} + +// SvcHost returns the svchost.Hostname for this provider. The +// default PublicRegistryHost is returned. +func (p *TerraformProvider) SvcHost() (svchost.Hostname, error) { + return svchost.ForComparison(PublicRegistryHost.Raw) +} diff --git a/vendor/github.com/hashicorp/terraform/registry/response/provider.go b/vendor/github.com/hashicorp/terraform/registry/response/provider.go new file mode 100644 index 0000000000..5e8bae3543 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/response/provider.go @@ -0,0 +1,36 @@ +package response + +import ( + "time" +) + +// Provider is the response structure with the data for a single provider +// version. This is just the metadata. A full provider response will be +// ProviderDetail. +type Provider struct { + ID string `json:"id"` + + //--------------------------------------------------------------- + // Metadata about the overall provider. + + Owner string `json:"owner"` + Namespace string `json:"namespace"` + Name string `json:"name"` + Version string `json:"version"` + Description string `json:"description"` + Source string `json:"source"` + PublishedAt time.Time `json:"published_at"` + Downloads int `json:"downloads"` +} + +// ProviderDetail represents a Provider with full detail. +type ProviderDetail struct { + Provider + + //--------------------------------------------------------------- + // The fields below are only set when requesting this specific + // module. They are available to easily know all available versions + // without multiple API calls. + + Versions []string `json:"versions"` // All versions +} diff --git a/vendor/github.com/hashicorp/terraform/registry/response/provider_list.go b/vendor/github.com/hashicorp/terraform/registry/response/provider_list.go new file mode 100644 index 0000000000..1dc7d237fd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/response/provider_list.go @@ -0,0 +1,7 @@ +package response + +// ProviderList is the response structure for a pageable list of providers. +type ProviderList struct { + Meta PaginationMeta `json:"meta"` + Providers []*Provider `json:"providers"` +} diff --git a/vendor/github.com/hashicorp/terraform/registry/response/terraform_provider.go b/vendor/github.com/hashicorp/terraform/registry/response/terraform_provider.go new file mode 100644 index 0000000000..64e454a6c4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/response/terraform_provider.go @@ -0,0 +1,96 @@ +package response + +import ( + "sort" + "strings" + + version "github.com/hashicorp/go-version" +) + +// TerraformProvider is the response structure for all required information for +// Terraform to choose a download URL. It must include all versions and all +// platforms for Terraform to perform version and os/arch constraint matching +// locally. +type TerraformProvider struct { + ID string `json:"id"` + Verified bool `json:"verified"` + + Versions []*TerraformProviderVersion `json:"versions"` +} + +// TerraformProviderVersion is the Terraform-specific response structure for a +// provider version. +type TerraformProviderVersion struct { + Version string `json:"version"` + Protocols []string `json:"protocols"` + + Platforms []*TerraformProviderPlatform `json:"platforms"` +} + +// TerraformProviderVersions is the Terraform-specific response structure for an +// array of provider versions +type TerraformProviderVersions struct { + ID string `json:"id"` + Versions []*TerraformProviderVersion `json:"versions"` + Warnings []string `json:"warnings"` +} + +// TerraformProviderPlatform is the Terraform-specific response structure for a +// provider platform. +type TerraformProviderPlatform struct { + OS string `json:"os"` + Arch string `json:"arch"` +} + +// TerraformProviderPlatformLocation is the Terraform-specific response +// structure for a provider platform with all details required to perform a +// download. +type TerraformProviderPlatformLocation struct { + Protocols []string `json:"protocols"` + OS string `json:"os"` + Arch string `json:"arch"` + Filename string `json:"filename"` + DownloadURL string `json:"download_url"` + ShasumsURL string `json:"shasums_url"` + ShasumsSignatureURL string `json:"shasums_signature_url"` + Shasum string `json:"shasum"` + + SigningKeys SigningKeyList `json:"signing_keys"` +} + +// SigningKeyList is the response structure for a list of signing keys. +type SigningKeyList struct { + GPGKeys []*GPGKey `json:"gpg_public_keys"` +} + +// GPGKey is the response structure for a GPG key. +type GPGKey struct { + ASCIIArmor string `json:"ascii_armor"` + Source string `json:"source"` + SourceURL *string `json:"source_url"` +} + +// Collection type for TerraformProviderVersion +type ProviderVersionCollection []*TerraformProviderVersion + +// GPGASCIIArmor returns an ASCII-armor-formatted string for all of the gpg +// keys in the response. +func (signingKeys *SigningKeyList) GPGASCIIArmor() string { + keys := []string{} + + for _, gpgKey := range signingKeys.GPGKeys { + keys = append(keys, gpgKey.ASCIIArmor) + } + + return strings.Join(keys, "\n") +} + +// Sort sorts versions from newest to oldest. +func (v ProviderVersionCollection) Sort() { + sort.Slice(v, func(i, j int) bool { + versionA, _ := version.NewVersion(v[i].Version) + versionB, _ := version.NewVersion(v[j].Version) + + return versionA.GreaterThan(versionB) + }) +} diff --git a/vendor/github.com/hashicorp/terraform/states/doc.go b/vendor/github.com/hashicorp/terraform/states/doc.go new file mode 100644 index 0000000000..7dd74ac785 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/doc.go @@ -0,0 +1,3 @@ +// Package states contains the types that are used to represent Terraform +// states. +package states diff --git a/vendor/github.com/hashicorp/terraform/states/eachmode_string.go b/vendor/github.com/hashicorp/terraform/states/eachmode_string.go new file mode 100644 index 0000000000..6de61802fb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/eachmode_string.go @@ -0,0 +1,26 @@ +// Code generated by "stringer -type EachMode"; DO NOT EDIT. + +package states + +import "strconv" + +const ( + _EachMode_name_0 = "NoEach" + _EachMode_name_1 = "EachListEachMap" +) + +var ( + _EachMode_index_1 = [...]uint8{0, 8, 15} +) + +func (i EachMode) String() string { + switch { + case i == 0: + return _EachMode_name_0 + case 76 <= i && i <= 77: + i -= 76 + return _EachMode_name_1[_EachMode_index_1[i]:_EachMode_index_1[i+1]] + default: + return "EachMode(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/hashicorp/terraform/states/instance_generation.go b/vendor/github.com/hashicorp/terraform/states/instance_generation.go new file mode 100644 index 0000000000..617ad4ea6d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/instance_generation.go @@ -0,0 +1,24 @@ +package states + +// Generation is used to represent multiple objects in a succession of objects +// represented by a single resource instance address. A resource instance can +// have multiple generations over its lifetime due to object replacement +// (when a change can't be applied without destroying and re-creating), and +// multiple generations can exist at the same time when create_before_destroy +// is used. +// +// A Generation value can either be the value of the variable "CurrentGen" or +// a value of type DeposedKey. Generation values can be compared for equality +// using "==" and used as map keys. The zero value of Generation (nil) is not +// a valid generation and must not be used. +type Generation interface { + generation() +} + +// CurrentGen is the Generation representing the currently-active object for +// a resource instance. +var CurrentGen Generation + +type currentGen struct{} + +func (g currentGen) generation() {} diff --git a/vendor/github.com/hashicorp/terraform/states/instance_object.go b/vendor/github.com/hashicorp/terraform/states/instance_object.go new file mode 100644 index 0000000000..1374c59d38 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/instance_object.go @@ -0,0 +1,120 @@ +package states + +import ( + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/addrs" +) + +// ResourceInstanceObject is the local representation of a specific remote +// object associated with a resource instance. In practice not all remote +// objects are actually remote in the sense of being accessed over the network, +// but this is the most common case. +// +// It is not valid to mutate a ResourceInstanceObject once it has been created. +// Instead, create a new object and replace the existing one. +type ResourceInstanceObject struct { + // Value is the object-typed value representing the remote object within + // Terraform. + Value cty.Value + + // Private is an opaque value set by the provider when this object was + // last created or updated. Terraform Core does not use this value in + // any way and it is not exposed anywhere in the user interface, so + // a provider can use it for retaining any necessary private state. + Private []byte + + // Status represents the "readiness" of the object as of the last time + // it was updated. + Status ObjectStatus + + // Dependencies is a set of other addresses in the same module which + // this instance depended on when the given attributes were evaluated. + // This is used to construct the dependency relationships for an object + // whose configuration is no longer available, such as if it has been + // removed from configuration altogether, or is now deposed. + Dependencies []addrs.Referenceable +} + +// ObjectStatus represents the status of a RemoteObject. +type ObjectStatus rune + +//go:generate stringer -type ObjectStatus + +const ( + // ObjectReady is an object status for an object that is ready to use. + ObjectReady ObjectStatus = 'R' + + // ObjectTainted is an object status representing an object that is in + // an unrecoverable bad state due to a partial failure during a create, + // update, or delete operation. Since it cannot be moved into the + // ObjectRead state, a tainted object must be replaced. + ObjectTainted ObjectStatus = 'T' + + // ObjectPlanned is a special object status used only for the transient + // placeholder objects we place into state during the refresh and plan + // walks to stand in for objects that will be created during apply. + // + // Any object of this status must have a corresponding change recorded + // in the current plan, whose value must then be used in preference to + // the value stored in state when evaluating expressions. A planned + // object stored in state will be incomplete if any of its attributes are + // not yet known, and the plan must be consulted in order to "see" those + // unknown values, because the state is not able to represent them. + ObjectPlanned ObjectStatus = 'P' +) + +// Encode marshals the value within the receiver to produce a +// ResourceInstanceObjectSrc ready to be written to a state file. +// +// The given type must be the implied type of the resource type schema, and +// the given value must conform to it. It is important to pass the schema +// type and not the object's own type so that dynamically-typed attributes +// will be stored correctly. The caller must also provide the version number +// of the schema that the given type was derived from, which will be recorded +// in the source object so it can be used to detect when schema migration is +// required on read. +// +// The returned object may share internal references with the receiver and +// so the caller must not mutate the receiver any further once once this +// method is called. +func (o *ResourceInstanceObject) Encode(ty cty.Type, schemaVersion uint64) (*ResourceInstanceObjectSrc, error) { + // Our state serialization can't represent unknown values, so we convert + // them to nulls here. This is lossy, but nobody should be writing unknown + // values here and expecting to get them out again later. + // + // We get unknown values here while we're building out a "planned state" + // during the plan phase, but the value stored in the plan takes precedence + // for expression evaluation. The apply step should never produce unknown + // values, but if it does it's the responsibility of the caller to detect + // and raise an error about that. + val := cty.UnknownAsNull(o.Value) + + src, err := ctyjson.Marshal(val, ty) + if err != nil { + return nil, err + } + + return &ResourceInstanceObjectSrc{ + SchemaVersion: schemaVersion, + AttrsJSON: src, + Private: o.Private, + Status: o.Status, + Dependencies: o.Dependencies, + }, nil +} + +// AsTainted returns a deep copy of the receiver with the status updated to +// ObjectTainted. +func (o *ResourceInstanceObject) AsTainted() *ResourceInstanceObject { + if o == nil { + // A nil object can't be tainted, but we'll allow this anyway to + // avoid a crash, since we presumably intend to eventually record + // the object has having been deleted anyway. + return nil + } + ret := o.DeepCopy() + ret.Status = ObjectTainted + return ret +} diff --git a/vendor/github.com/hashicorp/terraform/states/instance_object_src.go b/vendor/github.com/hashicorp/terraform/states/instance_object_src.go new file mode 100644 index 0000000000..6cb3c27e2a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/instance_object_src.go @@ -0,0 +1,113 @@ +package states + +import ( + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/config/hcl2shim" +) + +// ResourceInstanceObjectSrc is a not-fully-decoded version of +// ResourceInstanceObject. Decoding of it can be completed by first handling +// any schema migration steps to get to the latest schema version and then +// calling method Decode with the implied type of the latest schema. +type ResourceInstanceObjectSrc struct { + // SchemaVersion is the resource-type-specific schema version number that + // was current when either AttrsJSON or AttrsFlat was encoded. Migration + // steps are required if this is less than the current version number + // reported by the corresponding provider. + SchemaVersion uint64 + + // AttrsJSON is a JSON-encoded representation of the object attributes, + // encoding the value (of the object type implied by the associated resource + // type schema) that represents this remote object in Terraform Language + // expressions, and is compared with configuration when producing a diff. + // + // This is retained in JSON format here because it may require preprocessing + // before decoding if, for example, the stored attributes are for an older + // schema version which the provider must upgrade before use. If the + // version is current, it is valid to simply decode this using the + // type implied by the current schema, without the need for the provider + // to perform an upgrade first. + // + // When writing a ResourceInstanceObject into the state, AttrsJSON should + // always be conformant to the current schema version and the current + // schema version should be recorded in the SchemaVersion field. + AttrsJSON []byte + + // AttrsFlat is a legacy form of attributes used in older state file + // formats, and in the new state format for objects that haven't yet been + // upgraded. This attribute is mutually exclusive with Attrs: for any + // ResourceInstanceObject, only one of these attributes may be populated + // and the other must be nil. + // + // An instance object with this field populated should be upgraded to use + // Attrs at the earliest opportunity, since this legacy flatmap-based + // format will be phased out over time. AttrsFlat should not be used when + // writing new or updated objects to state; instead, callers must follow + // the recommendations in the AttrsJSON documentation above. + AttrsFlat map[string]string + + // These fields all correspond to the fields of the same name on + // ResourceInstanceObject. + Private []byte + Status ObjectStatus + Dependencies []addrs.Referenceable +} + +// Decode unmarshals the raw representation of the object attributes. Pass the +// implied type of the corresponding resource type schema for correct operation. +// +// Before calling Decode, the caller must check that the SchemaVersion field +// exactly equals the version number of the schema whose implied type is being +// passed, or else the result is undefined. +// +// The returned object may share internal references with the receiver and +// so the caller must not mutate the receiver any further once once this +// method is called. +func (os *ResourceInstanceObjectSrc) Decode(ty cty.Type) (*ResourceInstanceObject, error) { + var val cty.Value + var err error + if os.AttrsFlat != nil { + // Legacy mode. We'll do our best to unpick this from the flatmap. + val, err = hcl2shim.HCL2ValueFromFlatmap(os.AttrsFlat, ty) + if err != nil { + return nil, err + } + } else { + val, err = ctyjson.Unmarshal(os.AttrsJSON, ty) + if err != nil { + return nil, err + } + } + + return &ResourceInstanceObject{ + Value: val, + Status: os.Status, + Dependencies: os.Dependencies, + Private: os.Private, + }, nil +} + +// CompleteUpgrade creates a new ResourceInstanceObjectSrc by copying the +// metadata from the receiver and writing in the given new schema version +// and attribute value that are presumed to have resulted from upgrading +// from an older schema version. +func (os *ResourceInstanceObjectSrc) CompleteUpgrade(newAttrs cty.Value, newType cty.Type, newSchemaVersion uint64) (*ResourceInstanceObjectSrc, error) { + new := os.DeepCopy() + new.AttrsFlat = nil // We always use JSON after an upgrade, even if the source used flatmap + + // This is the same principle as ResourceInstanceObject.Encode, but + // avoiding a decode/re-encode cycle because we don't have type info + // available for the "old" attributes. + newAttrs = cty.UnknownAsNull(newAttrs) + src, err := ctyjson.Marshal(newAttrs, newType) + if err != nil { + return nil, err + } + + new.AttrsJSON = src + new.SchemaVersion = newSchemaVersion + return new, nil +} diff --git a/vendor/github.com/hashicorp/terraform/states/module.go b/vendor/github.com/hashicorp/terraform/states/module.go new file mode 100644 index 0000000000..d89e7878d8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/module.go @@ -0,0 +1,285 @@ +package states + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" +) + +// Module is a container for the states of objects within a particular module. +type Module struct { + Addr addrs.ModuleInstance + + // Resources contains the state for each resource. The keys in this map are + // an implementation detail and must not be used by outside callers. + Resources map[string]*Resource + + // OutputValues contains the state for each output value. The keys in this + // map are output value names. + OutputValues map[string]*OutputValue + + // LocalValues contains the value for each named output value. The keys + // in this map are local value names. + LocalValues map[string]cty.Value +} + +// NewModule constructs an empty module state for the given module address. +func NewModule(addr addrs.ModuleInstance) *Module { + return &Module{ + Addr: addr, + Resources: map[string]*Resource{}, + OutputValues: map[string]*OutputValue{}, + LocalValues: map[string]cty.Value{}, + } +} + +// Resource returns the state for the resource with the given address within +// the receiving module state, or nil if the requested resource is not tracked +// in the state. +func (ms *Module) Resource(addr addrs.Resource) *Resource { + return ms.Resources[addr.String()] +} + +// ResourceInstance returns the state for the resource instance with the given +// address within the receiving module state, or nil if the requested instance +// is not tracked in the state. +func (ms *Module) ResourceInstance(addr addrs.ResourceInstance) *ResourceInstance { + rs := ms.Resource(addr.Resource) + if rs == nil { + return nil + } + return rs.Instance(addr.Key) +} + +// SetResourceMeta updates the resource-level metadata for the resource +// with the given address, creating the resource state for it if it doesn't +// already exist. +func (ms *Module) SetResourceMeta(addr addrs.Resource, eachMode EachMode, provider addrs.AbsProviderConfig) { + rs := ms.Resource(addr) + if rs == nil { + rs = &Resource{ + Addr: addr, + Instances: map[addrs.InstanceKey]*ResourceInstance{}, + } + ms.Resources[addr.String()] = rs + } + + rs.EachMode = eachMode + rs.ProviderConfig = provider +} + +// RemoveResource removes the entire state for the given resource, taking with +// it any instances associated with the resource. This should generally be +// called only for resource objects whose instances have all been destroyed. +func (ms *Module) RemoveResource(addr addrs.Resource) { + delete(ms.Resources, addr.String()) +} + +// SetResourceInstanceCurrent saves the given instance object as the current +// generation of the resource instance with the given address, simulataneously +// updating the recorded provider configuration address, dependencies, and +// resource EachMode. +// +// Any existing current instance object for the given resource is overwritten. +// Set obj to nil to remove the primary generation object altogether. If there +// are no deposed objects then the instance will be removed altogether. +// +// The provider address and "each mode" are resource-wide settings and so they +// are updated for all other instances of the same resource as a side-effect of +// this call. +func (ms *Module) SetResourceInstanceCurrent(addr addrs.ResourceInstance, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { + ms.SetResourceMeta(addr.Resource, eachModeForInstanceKey(addr.Key), provider) + + rs := ms.Resource(addr.Resource) + is := rs.EnsureInstance(addr.Key) + + is.Current = obj + + if !is.HasObjects() { + // If we have no objects at all then we'll clean up. + delete(rs.Instances, addr.Key) + } + if rs.EachMode == NoEach && len(rs.Instances) == 0 { + // Also clean up if we only expect to have one instance anyway + // and there are none. We leave the resource behind if an each mode + // is active because an empty list or map of instances is a valid state. + delete(ms.Resources, addr.Resource.String()) + } +} + +// SetResourceInstanceDeposed saves the given instance object as a deposed +// generation of the resource instance with the given address and deposed key. +// +// Call this method only for pre-existing deposed objects that already have +// a known DeposedKey. For example, this method is useful if reloading objects +// that were persisted to a state file. To mark the current object as deposed, +// use DeposeResourceInstanceObject instead. +// +// The resource that contains the given instance must already exist in the +// state, or this method will panic. Use Resource to check first if its +// presence is not already guaranteed. +// +// Any existing current instance object for the given resource and deposed key +// is overwritten. Set obj to nil to remove the deposed object altogether. If +// the instance is left with no objects after this operation then it will +// be removed from its containing resource altogether. +func (ms *Module) SetResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { + ms.SetResourceMeta(addr.Resource, eachModeForInstanceKey(addr.Key), provider) + + rs := ms.Resource(addr.Resource) + is := rs.EnsureInstance(addr.Key) + if obj != nil { + is.Deposed[key] = obj + } else { + delete(is.Deposed, key) + } + + if !is.HasObjects() { + // If we have no objects at all then we'll clean up. + delete(rs.Instances, addr.Key) + } + if rs.EachMode == NoEach && len(rs.Instances) == 0 { + // Also clean up if we only expect to have one instance anyway + // and there are none. We leave the resource behind if an each mode + // is active because an empty list or map of instances is a valid state. + delete(ms.Resources, addr.Resource.String()) + } +} + +// ForgetResourceInstanceAll removes the record of all objects associated with +// the specified resource instance, if present. If not present, this is a no-op. +func (ms *Module) ForgetResourceInstanceAll(addr addrs.ResourceInstance) { + rs := ms.Resource(addr.Resource) + if rs == nil { + return + } + delete(rs.Instances, addr.Key) + + if rs.EachMode == NoEach && len(rs.Instances) == 0 { + // Also clean up if we only expect to have one instance anyway + // and there are none. We leave the resource behind if an each mode + // is active because an empty list or map of instances is a valid state. + delete(ms.Resources, addr.Resource.String()) + } +} + +// ForgetResourceInstanceDeposed removes the record of the deposed object with +// the given address and key, if present. If not present, this is a no-op. +func (ms *Module) ForgetResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey) { + rs := ms.Resource(addr.Resource) + if rs == nil { + return + } + is := rs.Instance(addr.Key) + if is == nil { + return + } + delete(is.Deposed, key) + + if !is.HasObjects() { + // If we have no objects at all then we'll clean up. + delete(rs.Instances, addr.Key) + } + if rs.EachMode == NoEach && len(rs.Instances) == 0 { + // Also clean up if we only expect to have one instance anyway + // and there are none. We leave the resource behind if an each mode + // is active because an empty list or map of instances is a valid state. + delete(ms.Resources, addr.Resource.String()) + } +} + +// deposeResourceInstanceObject is the real implementation of +// SyncState.DeposeResourceInstanceObject. +func (ms *Module) deposeResourceInstanceObject(addr addrs.ResourceInstance, forceKey DeposedKey) DeposedKey { + is := ms.ResourceInstance(addr) + if is == nil { + return NotDeposed + } + return is.deposeCurrentObject(forceKey) +} + +// maybeRestoreResourceInstanceDeposed is the real implementation of +// SyncState.MaybeRestoreResourceInstanceDeposed. +func (ms *Module) maybeRestoreResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey) bool { + rs := ms.Resource(addr.Resource) + if rs == nil { + return false + } + is := rs.Instance(addr.Key) + if is == nil { + return false + } + if is.Current != nil { + return false + } + if len(is.Deposed) == 0 { + return false + } + is.Current = is.Deposed[key] + delete(is.Deposed, key) + return true +} + +// SetOutputValue writes an output value into the state, overwriting any +// existing value of the same name. +func (ms *Module) SetOutputValue(name string, value cty.Value, sensitive bool) *OutputValue { + os := &OutputValue{ + Value: value, + Sensitive: sensitive, + } + ms.OutputValues[name] = os + return os +} + +// RemoveOutputValue removes the output value of the given name from the state, +// if it exists. This method is a no-op if there is no value of the given +// name. +func (ms *Module) RemoveOutputValue(name string) { + delete(ms.OutputValues, name) +} + +// SetLocalValue writes a local value into the state, overwriting any +// existing value of the same name. +func (ms *Module) SetLocalValue(name string, value cty.Value) { + ms.LocalValues[name] = value +} + +// RemoveLocalValue removes the local value of the given name from the state, +// if it exists. This method is a no-op if there is no value of the given +// name. +func (ms *Module) RemoveLocalValue(name string) { + delete(ms.LocalValues, name) +} + +// PruneResourceHusks is a specialized method that will remove any Resource +// objects that do not contain any instances, even if they have an EachMode. +// +// You probably shouldn't call this! See the method of the same name on +// type State for more information on what this is for and the rare situations +// where it is safe to use. +func (ms *Module) PruneResourceHusks() { + for _, rs := range ms.Resources { + if len(rs.Instances) == 0 { + ms.RemoveResource(rs.Addr) + } + } +} + +// empty returns true if the receving module state is contributing nothing +// to the state. In other words, it returns true if the module could be +// removed from the state altogether without changing the meaning of the state. +// +// In practice a module containing no objects is the same as a non-existent +// module, and so we can opportunistically clean up once a module becomes +// empty on the assumption that it will be re-added if needed later. +func (ms *Module) empty() bool { + if ms == nil { + return true + } + + // This must be updated to cover any new collections added to Module + // in future. + return (len(ms.Resources) == 0 && + len(ms.OutputValues) == 0 && + len(ms.LocalValues) == 0) +} diff --git a/vendor/github.com/hashicorp/terraform/states/objectstatus_string.go b/vendor/github.com/hashicorp/terraform/states/objectstatus_string.go new file mode 100644 index 0000000000..cd6d12849a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/objectstatus_string.go @@ -0,0 +1,24 @@ +// Code generated by "stringer -type ObjectStatus"; DO NOT EDIT. + +package states + +import "strconv" + +const ( + _ObjectStatus_name_0 = "ObjectPlanned" + _ObjectStatus_name_1 = "ObjectReady" + _ObjectStatus_name_2 = "ObjectTainted" +) + +func (i ObjectStatus) String() string { + switch { + case i == 80: + return _ObjectStatus_name_0 + case i == 82: + return _ObjectStatus_name_1 + case i == 84: + return _ObjectStatus_name_2 + default: + return "ObjectStatus(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/hashicorp/terraform/states/output_value.go b/vendor/github.com/hashicorp/terraform/states/output_value.go new file mode 100644 index 0000000000..d232b76d40 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/output_value.go @@ -0,0 +1,14 @@ +package states + +import ( + "github.com/zclconf/go-cty/cty" +) + +// OutputValue represents the state of a particular output value. +// +// It is not valid to mutate an OutputValue object once it has been created. +// Instead, create an entirely new OutputValue to replace the previous one. +type OutputValue struct { + Value cty.Value + Sensitive bool +} diff --git a/vendor/github.com/hashicorp/terraform/states/resource.go b/vendor/github.com/hashicorp/terraform/states/resource.go new file mode 100644 index 0000000000..e2a2b8588b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/resource.go @@ -0,0 +1,239 @@ +package states + +import ( + "fmt" + "math/rand" + "time" + + "github.com/hashicorp/terraform/addrs" +) + +// Resource represents the state of a resource. +type Resource struct { + // Addr is the module-relative address for the resource this state object + // belongs to. + Addr addrs.Resource + + // EachMode is the multi-instance mode currently in use for this resource, + // or NoEach if this is a single-instance resource. This dictates what + // type of value is returned when accessing this resource via expressions + // in the Terraform language. + EachMode EachMode + + // Instances contains the potentially-multiple instances associated with + // this resource. This map can contain a mixture of different key types, + // but only the ones of InstanceKeyType are considered current. + Instances map[addrs.InstanceKey]*ResourceInstance + + // ProviderConfig is the absolute address for the provider configuration that + // most recently managed this resource. This is used to connect a resource + // with a provider configuration when the resource configuration block is + // not available, such as if it has been removed from configuration + // altogether. + ProviderConfig addrs.AbsProviderConfig +} + +// Instance returns the state for the instance with the given key, or nil +// if no such instance is tracked within the state. +func (rs *Resource) Instance(key addrs.InstanceKey) *ResourceInstance { + return rs.Instances[key] +} + +// EnsureInstance returns the state for the instance with the given key, +// creating a new empty state for it if one doesn't already exist. +// +// Because this may create and save a new state, it is considered to be +// a write operation. +func (rs *Resource) EnsureInstance(key addrs.InstanceKey) *ResourceInstance { + ret := rs.Instance(key) + if ret == nil { + ret = NewResourceInstance() + rs.Instances[key] = ret + } + return ret +} + +// ResourceInstance represents the state of a particular instance of a resource. +type ResourceInstance struct { + // Current, if non-nil, is the remote object that is currently represented + // by the corresponding resource instance. + Current *ResourceInstanceObjectSrc + + // Deposed, if len > 0, contains any remote objects that were previously + // represented by the corresponding resource instance but have been + // replaced and are pending destruction due to the create_before_destroy + // lifecycle mode. + Deposed map[DeposedKey]*ResourceInstanceObjectSrc +} + +// NewResourceInstance constructs and returns a new ResourceInstance, ready to +// use. +func NewResourceInstance() *ResourceInstance { + return &ResourceInstance{ + Deposed: map[DeposedKey]*ResourceInstanceObjectSrc{}, + } +} + +// HasCurrent returns true if this resource instance has a "current"-generation +// object. Most instances do, but this can briefly be false during a +// create-before-destroy replace operation when the current has been deposed +// but its replacement has not yet been created. +func (i *ResourceInstance) HasCurrent() bool { + return i != nil && i.Current != nil +} + +// HasDeposed returns true if this resource instance has a deposed object +// with the given key. +func (i *ResourceInstance) HasDeposed(key DeposedKey) bool { + return i != nil && i.Deposed[key] != nil +} + +// HasAnyDeposed returns true if this resource instance has one or more +// deposed objects. +func (i *ResourceInstance) HasAnyDeposed() bool { + return i != nil && len(i.Deposed) > 0 +} + +// HasObjects returns true if this resource has any objects at all, whether +// current or deposed. +func (i *ResourceInstance) HasObjects() bool { + return i.Current != nil || len(i.Deposed) != 0 +} + +// deposeCurrentObject is part of the real implementation of +// SyncState.DeposeResourceInstanceObject. The exported method uses a lock +// to ensure that we can safely allocate an unused deposed key without +// collision. +func (i *ResourceInstance) deposeCurrentObject(forceKey DeposedKey) DeposedKey { + if !i.HasCurrent() { + return NotDeposed + } + + key := forceKey + if key == NotDeposed { + key = i.findUnusedDeposedKey() + } else { + if _, exists := i.Deposed[key]; exists { + panic(fmt.Sprintf("forced key %s is already in use", forceKey)) + } + } + i.Deposed[key] = i.Current + i.Current = nil + return key +} + +// GetGeneration retrieves the object of the given generation from the +// ResourceInstance, or returns nil if there is no such object. +// +// If the given generation is nil or invalid, this method will panic. +func (i *ResourceInstance) GetGeneration(gen Generation) *ResourceInstanceObjectSrc { + if gen == CurrentGen { + return i.Current + } + if dk, ok := gen.(DeposedKey); ok { + return i.Deposed[dk] + } + if gen == nil { + panic(fmt.Sprintf("get with nil Generation")) + } + // Should never fall out here, since the above covers all possible + // Generation values. + panic(fmt.Sprintf("get invalid Generation %#v", gen)) +} + +// FindUnusedDeposedKey generates a unique DeposedKey that is guaranteed not to +// already be in use for this instance at the time of the call. +// +// Note that the validity of this result may change if new deposed keys are +// allocated before it is used. To avoid this risk, instead use the +// DeposeResourceInstanceObject method on the SyncState wrapper type, which +// allocates a key and uses it atomically. +func (i *ResourceInstance) FindUnusedDeposedKey() DeposedKey { + return i.findUnusedDeposedKey() +} + +// findUnusedDeposedKey generates a unique DeposedKey that is guaranteed not to +// already be in use for this instance. +func (i *ResourceInstance) findUnusedDeposedKey() DeposedKey { + for { + key := NewDeposedKey() + if _, exists := i.Deposed[key]; !exists { + return key + } + // Spin until we find a unique one. This shouldn't take long, because + // we have a 32-bit keyspace and there's rarely more than one deposed + // instance. + } +} + +// EachMode specifies the multi-instance mode for a resource. +type EachMode rune + +const ( + NoEach EachMode = 0 + EachList EachMode = 'L' + EachMap EachMode = 'M' +) + +//go:generate stringer -type EachMode + +func eachModeForInstanceKey(key addrs.InstanceKey) EachMode { + switch key.(type) { + case addrs.IntKey: + return EachList + case addrs.StringKey: + return EachMap + default: + if key == addrs.NoKey { + return NoEach + } + panic(fmt.Sprintf("don't know an each mode for instance key %#v", key)) + } +} + +// DeposedKey is a 8-character hex string used to uniquely identify deposed +// instance objects in the state. +type DeposedKey string + +// NotDeposed is a special invalid value of DeposedKey that is used to represent +// the absense of a deposed key. It must not be used as an actual deposed key. +const NotDeposed = DeposedKey("") + +var deposedKeyRand = rand.New(rand.NewSource(time.Now().UnixNano())) + +// NewDeposedKey generates a pseudo-random deposed key. Because of the short +// length of these keys, uniqueness is not a natural consequence and so the +// caller should test to see if the generated key is already in use and generate +// another if so, until a unique key is found. +func NewDeposedKey() DeposedKey { + v := deposedKeyRand.Uint32() + return DeposedKey(fmt.Sprintf("%08x", v)) +} + +func (k DeposedKey) String() string { + return string(k) +} + +func (k DeposedKey) GoString() string { + ks := string(k) + switch { + case ks == "": + return "states.NotDeposed" + default: + return fmt.Sprintf("states.DeposedKey(%s)", ks) + } +} + +// Generation is a helper method to convert a DeposedKey into a Generation. +// If the reciever is anything other than NotDeposed then the result is +// just the same value as a Generation. If the receiver is NotDeposed then +// the result is CurrentGen. +func (k DeposedKey) Generation() Generation { + if k == NotDeposed { + return CurrentGen + } + return k +} + +// generation is an implementation of Generation. +func (k DeposedKey) generation() {} diff --git a/vendor/github.com/hashicorp/terraform/states/state.go b/vendor/github.com/hashicorp/terraform/states/state.go new file mode 100644 index 0000000000..1f842359e9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/state.go @@ -0,0 +1,229 @@ +package states + +import ( + "sort" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" +) + +// State is the top-level type of a Terraform state. +// +// A state should be mutated only via its accessor methods, to ensure that +// invariants are preserved. +// +// Access to State and the nested values within it is not concurrency-safe, +// so when accessing a State object concurrently it is the caller's +// responsibility to ensure that only one write is in progress at a time +// and that reads only occur when no write is in progress. The most common +// way to acheive this is to wrap the State in a SyncState and use the +// higher-level atomic operations supported by that type. +type State struct { + // Modules contains the state for each module. The keys in this map are + // an implementation detail and must not be used by outside callers. + Modules map[string]*Module +} + +// NewState constructs a minimal empty state, containing an empty root module. +func NewState() *State { + modules := map[string]*Module{} + modules[addrs.RootModuleInstance.String()] = NewModule(addrs.RootModuleInstance) + return &State{ + Modules: modules, + } +} + +// BuildState is a helper -- primarily intended for tests -- to build a state +// using imperative code against the StateSync type while still acting as +// an expression of type *State to assign into a containing struct. +func BuildState(cb func(*SyncState)) *State { + s := NewState() + cb(s.SyncWrapper()) + return s +} + +// Empty returns true if there are no resources or populated output values +// in the receiver. In other words, if this state could be safely replaced +// with the return value of NewState and be functionally equivalent. +func (s *State) Empty() bool { + if s == nil { + return true + } + for _, ms := range s.Modules { + if len(ms.Resources) != 0 { + return false + } + if len(ms.OutputValues) != 0 { + return false + } + } + return true +} + +// Module returns the state for the module with the given address, or nil if +// the requested module is not tracked in the state. +func (s *State) Module(addr addrs.ModuleInstance) *Module { + if s == nil { + panic("State.Module on nil *State") + } + return s.Modules[addr.String()] +} + +// RemoveModule removes the module with the given address from the state, +// unless it is the root module. The root module cannot be deleted, and so +// this method will panic if that is attempted. +// +// Removing a module implicitly discards all of the resources, outputs and +// local values within it, and so this should usually be done only for empty +// modules. For callers accessing the state through a SyncState wrapper, modules +// are automatically pruned if they are empty after one of their contained +// elements is removed. +func (s *State) RemoveModule(addr addrs.ModuleInstance) { + if addr.IsRoot() { + panic("attempted to remove root module") + } + + delete(s.Modules, addr.String()) +} + +// RootModule is a convenient alias for Module(addrs.RootModuleInstance). +func (s *State) RootModule() *Module { + if s == nil { + panic("RootModule called on nil State") + } + return s.Modules[addrs.RootModuleInstance.String()] +} + +// EnsureModule returns the state for the module with the given address, +// creating and adding a new one if necessary. +// +// Since this might modify the state to add a new instance, it is considered +// to be a write operation. +func (s *State) EnsureModule(addr addrs.ModuleInstance) *Module { + ms := s.Module(addr) + if ms == nil { + ms = NewModule(addr) + s.Modules[addr.String()] = ms + } + return ms +} + +// HasResources returns true if there is at least one resource (of any mode) +// present in the receiving state. +func (s *State) HasResources() bool { + if s == nil { + return false + } + for _, ms := range s.Modules { + if len(ms.Resources) > 0 { + return true + } + } + return false +} + +// Resource returns the state for the resource with the given address, or nil +// if no such resource is tracked in the state. +func (s *State) Resource(addr addrs.AbsResource) *Resource { + ms := s.Module(addr.Module) + if ms == nil { + return nil + } + return ms.Resource(addr.Resource) +} + +// ResourceInstance returns the state for the resource instance with the given +// address, or nil if no such resource is tracked in the state. +func (s *State) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstance { + if s == nil { + panic("State.ResourceInstance on nil *State") + } + ms := s.Module(addr.Module) + if ms == nil { + return nil + } + return ms.ResourceInstance(addr.Resource) +} + +// OutputValue returns the state for the output value with the given address, +// or nil if no such output value is tracked in the state. +func (s *State) OutputValue(addr addrs.AbsOutputValue) *OutputValue { + ms := s.Module(addr.Module) + if ms == nil { + return nil + } + return ms.OutputValues[addr.OutputValue.Name] +} + +// LocalValue returns the value of the named local value with the given address, +// or cty.NilVal if no such value is tracked in the state. +func (s *State) LocalValue(addr addrs.AbsLocalValue) cty.Value { + ms := s.Module(addr.Module) + if ms == nil { + return cty.NilVal + } + return ms.LocalValues[addr.LocalValue.Name] +} + +// ProviderAddrs returns a list of all of the provider configuration addresses +// referenced throughout the receiving state. +// +// The result is de-duplicated so that each distinct address appears only once. +func (s *State) ProviderAddrs() []addrs.AbsProviderConfig { + if s == nil { + return nil + } + + m := map[string]addrs.AbsProviderConfig{} + for _, ms := range s.Modules { + for _, rc := range ms.Resources { + m[rc.ProviderConfig.String()] = rc.ProviderConfig + } + } + if len(m) == 0 { + return nil + } + + // This is mainly just so we'll get stable results for testing purposes. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + ret := make([]addrs.AbsProviderConfig, len(keys)) + for i, key := range keys { + ret[i] = m[key] + } + + return ret +} + +// PruneResourceHusks is a specialized method that will remove any Resource +// objects that do not contain any instances, even if they have an EachMode. +// +// This should generally be used only after a "terraform destroy" operation, +// to finalize the cleanup of the state. It is not correct to use this after +// other operations because if a resource has "count = 0" or "for_each" over +// an empty collection then we want to retain it in the state so that references +// to it, particularly in "strange" contexts like "terraform console", can be +// properly resolved. +// +// This method MUST NOT be called concurrently with other readers and writers +// of the receiving state. +func (s *State) PruneResourceHusks() { + for _, m := range s.Modules { + m.PruneResourceHusks() + if len(m.Resources) == 0 && !m.Addr.IsRoot() { + s.RemoveModule(m.Addr) + } + } +} + +// SyncWrapper returns a SyncState object wrapping the receiver. +func (s *State) SyncWrapper() *SyncState { + return &SyncState{ + state: s, + } +} diff --git a/vendor/github.com/hashicorp/terraform/states/state_deepcopy.go b/vendor/github.com/hashicorp/terraform/states/state_deepcopy.go new file mode 100644 index 0000000000..ea717d00ec --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/state_deepcopy.go @@ -0,0 +1,218 @@ +package states + +import ( + "github.com/hashicorp/terraform/addrs" + "github.com/zclconf/go-cty/cty" +) + +// Taking deep copies of states is an important operation because state is +// otherwise a mutable data structure that is challenging to share across +// many separate callers. It is important that the DeepCopy implementations +// in this file comprehensively copy all parts of the state data structure +// that could be mutated via pointers. + +// DeepCopy returns a new state that contains equivalent data to the reciever +// but shares no backing memory in common. +// +// As with all methods on State, this method is not safe to use concurrently +// with writing to any portion of the recieving data structure. It is the +// caller's responsibility to ensure mutual exclusion for the duration of the +// operation, but may then freely modify the receiver and the returned copy +// independently once this method returns. +func (s *State) DeepCopy() *State { + if s == nil { + return nil + } + + modules := make(map[string]*Module, len(s.Modules)) + for k, m := range s.Modules { + modules[k] = m.DeepCopy() + } + return &State{ + Modules: modules, + } +} + +// DeepCopy returns a new module state that contains equivalent data to the +// receiver but shares no backing memory in common. +// +// As with all methods on Module, this method is not safe to use concurrently +// with writing to any portion of the recieving data structure. It is the +// caller's responsibility to ensure mutual exclusion for the duration of the +// operation, but may then freely modify the receiver and the returned copy +// independently once this method returns. +func (ms *Module) DeepCopy() *Module { + if ms == nil { + return nil + } + + resources := make(map[string]*Resource, len(ms.Resources)) + for k, r := range ms.Resources { + resources[k] = r.DeepCopy() + } + outputValues := make(map[string]*OutputValue, len(ms.OutputValues)) + for k, v := range ms.OutputValues { + outputValues[k] = v.DeepCopy() + } + localValues := make(map[string]cty.Value, len(ms.LocalValues)) + for k, v := range ms.LocalValues { + // cty.Value is immutable, so we don't need to copy these. + localValues[k] = v + } + + return &Module{ + Addr: ms.Addr, // technically mutable, but immutable by convention + Resources: resources, + OutputValues: outputValues, + LocalValues: localValues, + } +} + +// DeepCopy returns a new resource state that contains equivalent data to the +// receiver but shares no backing memory in common. +// +// As with all methods on Resource, this method is not safe to use concurrently +// with writing to any portion of the recieving data structure. It is the +// caller's responsibility to ensure mutual exclusion for the duration of the +// operation, but may then freely modify the receiver and the returned copy +// independently once this method returns. +func (rs *Resource) DeepCopy() *Resource { + if rs == nil { + return nil + } + + instances := make(map[addrs.InstanceKey]*ResourceInstance, len(rs.Instances)) + for k, i := range rs.Instances { + instances[k] = i.DeepCopy() + } + + return &Resource{ + Addr: rs.Addr, + EachMode: rs.EachMode, + Instances: instances, + ProviderConfig: rs.ProviderConfig, // technically mutable, but immutable by convention + } +} + +// DeepCopy returns a new resource instance state that contains equivalent data +// to the receiver but shares no backing memory in common. +// +// As with all methods on ResourceInstance, this method is not safe to use +// concurrently with writing to any portion of the recieving data structure. It +// is the caller's responsibility to ensure mutual exclusion for the duration +// of the operation, but may then freely modify the receiver and the returned +// copy independently once this method returns. +func (is *ResourceInstance) DeepCopy() *ResourceInstance { + if is == nil { + return nil + } + + deposed := make(map[DeposedKey]*ResourceInstanceObjectSrc, len(is.Deposed)) + for k, obj := range is.Deposed { + deposed[k] = obj.DeepCopy() + } + + return &ResourceInstance{ + Current: is.Current.DeepCopy(), + Deposed: deposed, + } +} + +// DeepCopy returns a new resource instance object that contains equivalent data +// to the receiver but shares no backing memory in common. +// +// As with all methods on ResourceInstanceObjectSrc, this method is not safe to +// use concurrently with writing to any portion of the recieving data structure. +// It is the caller's responsibility to ensure mutual exclusion for the duration +// of the operation, but may then freely modify the receiver and the returned +// copy independently once this method returns. +func (obj *ResourceInstanceObjectSrc) DeepCopy() *ResourceInstanceObjectSrc { + if obj == nil { + return nil + } + + var attrsFlat map[string]string + if obj.AttrsFlat != nil { + attrsFlat = make(map[string]string, len(obj.AttrsFlat)) + for k, v := range obj.AttrsFlat { + attrsFlat[k] = v + } + } + + var attrsJSON []byte + if obj.AttrsJSON != nil { + attrsJSON = make([]byte, len(obj.AttrsJSON)) + copy(attrsJSON, obj.AttrsJSON) + } + + var private []byte + if obj.Private != nil { + private := make([]byte, len(obj.Private)) + copy(private, obj.Private) + } + + // Some addrs.Referencable implementations are technically mutable, but + // we treat them as immutable by convention and so we don't deep-copy here. + dependencies := make([]addrs.Referenceable, len(obj.Dependencies)) + copy(dependencies, obj.Dependencies) + + return &ResourceInstanceObjectSrc{ + Status: obj.Status, + SchemaVersion: obj.SchemaVersion, + Private: private, + AttrsFlat: attrsFlat, + AttrsJSON: attrsJSON, + Dependencies: dependencies, + } +} + +// DeepCopy returns a new resource instance object that contains equivalent data +// to the receiver but shares no backing memory in common. +// +// As with all methods on ResourceInstanceObject, this method is not safe to use +// concurrently with writing to any portion of the recieving data structure. It +// is the caller's responsibility to ensure mutual exclusion for the duration +// of the operation, but may then freely modify the receiver and the returned +// copy independently once this method returns. +func (obj *ResourceInstanceObject) DeepCopy() *ResourceInstanceObject { + if obj == nil { + return nil + } + + var private []byte + if obj.Private != nil { + private := make([]byte, len(obj.Private)) + copy(private, obj.Private) + } + + // Some addrs.Referencable implementations are technically mutable, but + // we treat them as immutable by convention and so we don't deep-copy here. + dependencies := make([]addrs.Referenceable, len(obj.Dependencies)) + copy(dependencies, obj.Dependencies) + + return &ResourceInstanceObject{ + Value: obj.Value, + Status: obj.Status, + Private: private, + Dependencies: dependencies, + } +} + +// DeepCopy returns a new output value state that contains equivalent data +// to the receiver but shares no backing memory in common. +// +// As with all methods on OutputValue, this method is not safe to use +// concurrently with writing to any portion of the recieving data structure. It +// is the caller's responsibility to ensure mutual exclusion for the duration +// of the operation, but may then freely modify the receiver and the returned +// copy independently once this method returns. +func (os *OutputValue) DeepCopy() *OutputValue { + if os == nil { + return nil + } + + return &OutputValue{ + Value: os.Value, + Sensitive: os.Sensitive, + } +} diff --git a/vendor/github.com/hashicorp/terraform/states/state_equal.go b/vendor/github.com/hashicorp/terraform/states/state_equal.go new file mode 100644 index 0000000000..ea20967e5b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/state_equal.go @@ -0,0 +1,18 @@ +package states + +import ( + "reflect" +) + +// Equal returns true if the receiver is functionally equivalent to other, +// including any ephemeral portions of the state that would not be included +// if the state were saved to files. +// +// To test only the persistent portions of two states for equality, instead +// use statefile.StatesMarshalEqual. +func (s *State) Equal(other *State) bool { + // For the moment this is sufficient, but we may need to do something + // more elaborate in future if we have any portions of state that require + // more sophisticated comparisons. + return reflect.DeepEqual(s, other) +} diff --git a/vendor/github.com/hashicorp/terraform/states/state_string.go b/vendor/github.com/hashicorp/terraform/states/state_string.go new file mode 100644 index 0000000000..bca4581c95 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/state_string.go @@ -0,0 +1,279 @@ +package states + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "sort" + "strings" + + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/config/hcl2shim" +) + +// String returns a rather-odd string representation of the entire state. +// +// This is intended to match the behavior of the older terraform.State.String +// method that is used in lots of existing tests. It should not be used in +// new tests: instead, use "cmp" to directly compare the state data structures +// and print out a diff if they do not match. +// +// This method should never be used in non-test code, whether directly by call +// or indirectly via a %s or %q verb in package fmt. +func (s *State) String() string { + if s == nil { + return "" + } + + // sort the modules by name for consistent output + modules := make([]string, 0, len(s.Modules)) + for m := range s.Modules { + modules = append(modules, m) + } + sort.Strings(modules) + + var buf bytes.Buffer + for _, name := range modules { + m := s.Modules[name] + mStr := m.testString() + + // If we're the root module, we just write the output directly. + if m.Addr.IsRoot() { + buf.WriteString(mStr + "\n") + continue + } + + // We need to build out a string that resembles the not-quite-standard + // format that terraform.State.String used to use, where there's a + // "module." prefix but then just a chain of all of the module names + // without any further "module." portions. + buf.WriteString("module") + for _, step := range m.Addr { + buf.WriteByte('.') + buf.WriteString(step.Name) + if step.InstanceKey != addrs.NoKey { + buf.WriteByte('[') + buf.WriteString(step.InstanceKey.String()) + buf.WriteByte(']') + } + } + buf.WriteString(":\n") + + s := bufio.NewScanner(strings.NewReader(mStr)) + for s.Scan() { + text := s.Text() + if text != "" { + text = " " + text + } + + buf.WriteString(fmt.Sprintf("%s\n", text)) + } + } + + return strings.TrimSpace(buf.String()) +} + +// testString is used to produce part of the output of State.String. It should +// never be used directly. +func (m *Module) testString() string { + var buf bytes.Buffer + + if len(m.Resources) == 0 { + buf.WriteString("") + } + + // We use AbsResourceInstance here, even though everything belongs to + // the same module, just because we have a sorting behavior defined + // for those but not for just ResourceInstance. + addrsOrder := make([]addrs.AbsResourceInstance, 0, len(m.Resources)) + for _, rs := range m.Resources { + for ik := range rs.Instances { + addrsOrder = append(addrsOrder, rs.Addr.Instance(ik).Absolute(addrs.RootModuleInstance)) + } + } + + sort.Slice(addrsOrder, func(i, j int) bool { + return addrsOrder[i].Less(addrsOrder[j]) + }) + + for _, fakeAbsAddr := range addrsOrder { + addr := fakeAbsAddr.Resource + rs := m.Resource(addr.ContainingResource()) + is := m.ResourceInstance(addr) + + // Here we need to fake up a legacy-style address as the old state + // types would've used, since that's what our tests against those + // old types expect. The significant difference is that instancekey + // is dot-separated rather than using index brackets. + k := addr.ContainingResource().String() + if addr.Key != addrs.NoKey { + switch tk := addr.Key.(type) { + case addrs.IntKey: + k = fmt.Sprintf("%s.%d", k, tk) + default: + // No other key types existed for the legacy types, so we + // can do whatever we want here. We'll just use our standard + // syntax for these. + k = k + tk.String() + } + } + + id := LegacyInstanceObjectID(is.Current) + + taintStr := "" + if is.Current != nil && is.Current.Status == ObjectTainted { + taintStr = " (tainted)" + } + + deposedStr := "" + if len(is.Deposed) > 0 { + deposedStr = fmt.Sprintf(" (%d deposed)", len(is.Deposed)) + } + + buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr)) + buf.WriteString(fmt.Sprintf(" ID = %s\n", id)) + buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.ProviderConfig.String())) + + // Attributes were a flatmap before, but are not anymore. To preserve + // our old output as closely as possible we need to do a conversion + // to flatmap. Normally we'd want to do this with schema for + // accuracy, but for our purposes here it only needs to be approximate. + // This should produce an identical result for most cases, though + // in particular will differ in a few cases: + // - The keys used for elements in a set will be different + // - Values for attributes of type cty.DynamicPseudoType will be + // misinterpreted (but these weren't possible in old world anyway) + var attributes map[string]string + if obj := is.Current; obj != nil { + switch { + case obj.AttrsFlat != nil: + // Easy (but increasingly unlikely) case: the state hasn't + // actually been upgraded to the new form yet. + attributes = obj.AttrsFlat + case obj.AttrsJSON != nil: + ty, err := ctyjson.ImpliedType(obj.AttrsJSON) + if err == nil { + val, err := ctyjson.Unmarshal(obj.AttrsJSON, ty) + if err == nil { + attributes = hcl2shim.FlatmapValueFromHCL2(val) + } + } + } + } + attrKeys := make([]string, 0, len(attributes)) + for ak, val := range attributes { + if ak == "id" { + continue + } + + // don't show empty containers in the output + if val == "0" && (strings.HasSuffix(ak, ".#") || strings.HasSuffix(ak, ".%")) { + continue + } + + attrKeys = append(attrKeys, ak) + } + + sort.Strings(attrKeys) + + for _, ak := range attrKeys { + av := attributes[ak] + buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av)) + } + + // CAUTION: Since deposed keys are now random strings instead of + // incrementing integers, this result will not be deterministic + // if there is more than one deposed object. + i := 1 + for _, t := range is.Deposed { + id := LegacyInstanceObjectID(t) + taintStr := "" + if t.Status == ObjectTainted { + taintStr = " (tainted)" + } + buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", i, id, taintStr)) + i++ + } + + if obj := is.Current; obj != nil && len(obj.Dependencies) > 0 { + buf.WriteString(fmt.Sprintf("\n Dependencies:\n")) + for _, dep := range obj.Dependencies { + buf.WriteString(fmt.Sprintf(" %s\n", dep.String())) + } + } + } + + if len(m.OutputValues) > 0 { + buf.WriteString("\nOutputs:\n\n") + + ks := make([]string, 0, len(m.OutputValues)) + for k := range m.OutputValues { + ks = append(ks, k) + } + sort.Strings(ks) + + for _, k := range ks { + v := m.OutputValues[k] + lv := hcl2shim.ConfigValueFromHCL2(v.Value) + switch vTyped := lv.(type) { + case string: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case []interface{}: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case map[string]interface{}: + var mapKeys []string + for key := range vTyped { + mapKeys = append(mapKeys, key) + } + sort.Strings(mapKeys) + + var mapBuf bytes.Buffer + mapBuf.WriteString("{") + for _, key := range mapKeys { + mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key])) + } + mapBuf.WriteString("}") + + buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String())) + default: + buf.WriteString(fmt.Sprintf("%s = %#v\n", k, lv)) + } + } + } + + return buf.String() +} + +// LegacyInstanceObjectID is a helper for extracting an object id value from +// an instance object in a way that approximates how we used to do this +// for the old state types. ID is no longer first-class, so this is preserved +// only for compatibility with old tests that include the id as part of their +// expected value. +func LegacyInstanceObjectID(obj *ResourceInstanceObjectSrc) string { + if obj == nil { + return "" + } + + if obj.AttrsJSON != nil { + type WithID struct { + ID string `json:"id"` + } + var withID WithID + err := json.Unmarshal(obj.AttrsJSON, &withID) + if err == nil { + return withID.ID + } + } else if obj.AttrsFlat != nil { + if flatID, exists := obj.AttrsFlat["id"]; exists { + return flatID + } + } + + // For resource types created after we removed id as special there may + // not actually be one at all. This is okay because older tests won't + // encounter this, and new tests shouldn't be using ids. + return "" +} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/diagnostics.go b/vendor/github.com/hashicorp/terraform/states/statefile/diagnostics.go new file mode 100644 index 0000000000..a6d88ecd58 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statefile/diagnostics.go @@ -0,0 +1,62 @@ +package statefile + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/terraform/tfdiags" +) + +const invalidFormat = "Invalid state file format" + +// jsonUnmarshalDiags is a helper that translates errors returned from +// json.Unmarshal into hopefully-more-helpful diagnostics messages. +func jsonUnmarshalDiags(err error) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + if err == nil { + return diags + } + + switch tErr := err.(type) { + case *json.SyntaxError: + // We've usually already successfully parsed a source file as JSON at + // least once before we'd use jsonUnmarshalDiags with it (to sniff + // the version number) so this particular error should not appear much + // in practice. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + invalidFormat, + fmt.Sprintf("The state file could not be parsed as JSON: syntax error at byte offset %d.", tErr.Offset), + )) + case *json.UnmarshalTypeError: + // This is likely to be the most common area, describing a + // non-conformance between the file and the expected file format + // at a semantic level. + if tErr.Field != "" { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + invalidFormat, + fmt.Sprintf("The state file field %q has invalid value %s", tErr.Field, tErr.Value), + )) + break + } else { + // Without a field name, we can't really say anything helpful. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + invalidFormat, + "The state file does not conform to the expected JSON data structure.", + )) + } + default: + // Fallback for all other types of errors. This can happen only for + // custom UnmarshalJSON implementations, so should be encountered + // only rarely. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + invalidFormat, + fmt.Sprintf("The state file does not conform to the expected JSON data structure: %s.", err.Error()), + )) + } + + return diags +} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/doc.go b/vendor/github.com/hashicorp/terraform/states/statefile/doc.go new file mode 100644 index 0000000000..625d0cf429 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statefile/doc.go @@ -0,0 +1,3 @@ +// Package statefile deals with the file format used to serialize states for +// persistent storage and then deserialize them into memory again later. +package statefile diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/file.go b/vendor/github.com/hashicorp/terraform/states/statefile/file.go new file mode 100644 index 0000000000..6e20240199 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statefile/file.go @@ -0,0 +1,62 @@ +package statefile + +import ( + version "github.com/hashicorp/go-version" + + "github.com/hashicorp/terraform/states" + tfversion "github.com/hashicorp/terraform/version" +) + +// File is the in-memory representation of a state file. It includes the state +// itself along with various metadata used to track changing state files for +// the same configuration over time. +type File struct { + // TerraformVersion is the version of Terraform that wrote this state file. + TerraformVersion *version.Version + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial uint64 + + // Lineage is set when a new, blank state file is created and then + // never updated. This allows us to determine whether the serials + // of two states can be meaningfully compared. + // Apart from the guarantee that collisions between two lineages + // are very unlikely, this value is opaque and external callers + // should only compare lineage strings byte-for-byte for equality. + Lineage string + + // State is the actual state represented by this file. + State *states.State +} + +func New(state *states.State, lineage string, serial uint64) *File { + // To make life easier on callers, we'll accept a nil state here and just + // allocate an empty one, which is required for this file to be successfully + // written out. + if state == nil { + state = states.NewState() + } + + return &File{ + TerraformVersion: tfversion.SemVer, + State: state, + Lineage: lineage, + Serial: serial, + } +} + +// DeepCopy is a convenience method to create a new File object whose state +// is a deep copy of the receiver's, as implemented by states.State.DeepCopy. +func (f *File) DeepCopy() *File { + if f == nil { + return nil + } + return &File{ + TerraformVersion: f.TerraformVersion, + Serial: f.Serial, + Lineage: f.Lineage, + State: f.State.DeepCopy(), + } +} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/marshal_equal.go b/vendor/github.com/hashicorp/terraform/states/statefile/marshal_equal.go new file mode 100644 index 0000000000..4948b39b9e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statefile/marshal_equal.go @@ -0,0 +1,40 @@ +package statefile + +import ( + "bytes" + + "github.com/hashicorp/terraform/states" +) + +// StatesMarshalEqual returns true if and only if the two given states have +// an identical (byte-for-byte) statefile representation. +// +// This function compares only the portions of the state that are persisted +// in state files, so for example it will not return false if the only +// differences between the two states are local values or descendent module +// outputs. +func StatesMarshalEqual(a, b *states.State) bool { + var aBuf bytes.Buffer + var bBuf bytes.Buffer + + // nil states are not valid states, and so they can never martial equal. + if a == nil || b == nil { + return false + } + + // We write here some temporary files that have no header information + // populated, thus ensuring that we're only comparing the state itself + // and not any metadata. + err := Write(&File{State: a}, &aBuf) + if err != nil { + // Should never happen, because we're writing to an in-memory buffer + panic(err) + } + err = Write(&File{State: b}, &bBuf) + if err != nil { + // Should never happen, because we're writing to an in-memory buffer + panic(err) + } + + return bytes.Equal(aBuf.Bytes(), bBuf.Bytes()) +} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/read.go b/vendor/github.com/hashicorp/terraform/states/statefile/read.go new file mode 100644 index 0000000000..d691c0290d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statefile/read.go @@ -0,0 +1,209 @@ +package statefile + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + + version "github.com/hashicorp/go-version" + + "github.com/hashicorp/terraform/tfdiags" + tfversion "github.com/hashicorp/terraform/version" +) + +// ErrNoState is returned by ReadState when the state file is empty. +var ErrNoState = errors.New("no state") + +// Read reads a state from the given reader. +// +// Legacy state format versions 1 through 3 are supported, but the result will +// contain object attributes in the deprecated "flatmap" format and so must +// be upgraded by the caller before use. +// +// If the state file is empty, the special error value ErrNoState is returned. +// Otherwise, the returned error might be a wrapper around tfdiags.Diagnostics +// potentially describing multiple errors. +func Read(r io.Reader) (*File, error) { + // Some callers provide us a "typed nil" *os.File here, which would + // cause us to panic below if we tried to use it. + if f, ok := r.(*os.File); ok && f == nil { + return nil, ErrNoState + } + + var diags tfdiags.Diagnostics + + // We actually just buffer the whole thing in memory, because states are + // generally not huge and we need to do be able to sniff for a version + // number before full parsing. + src, err := ioutil.ReadAll(r) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read state file", + fmt.Sprintf("The state file could not be read: %s", err), + )) + return nil, diags.Err() + } + + if len(src) == 0 { + return nil, ErrNoState + } + + state, diags := readState(src) + if diags.HasErrors() { + return nil, diags.Err() + } + + if state == nil { + // Should never happen + panic("readState returned nil state with no errors") + } + + if state.TerraformVersion != nil && state.TerraformVersion.GreaterThan(tfversion.SemVer) { + return state, fmt.Errorf( + "state snapshot was created by Terraform v%s, which is newer than current v%s; upgrade to Terraform v%s or greater to work with this state", + state.TerraformVersion, + tfversion.SemVer, + state.TerraformVersion, + ) + } + + return state, diags.Err() +} + +func readState(src []byte) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + if looksLikeVersion0(src) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + "The state is stored in a legacy binary format that is not supported since Terraform v0.7. To continue, first upgrade the state using Terraform 0.6.16 or earlier.", + )) + return nil, diags + } + + version, versionDiags := sniffJSONStateVersion(src) + diags = diags.Append(versionDiags) + if versionDiags.HasErrors() { + return nil, diags + } + + switch version { + case 0: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + "The state file uses JSON syntax but has a version number of zero. There was never a JSON-based state format zero, so this state file is invalid and cannot be processed.", + )) + return nil, diags + case 1: + return readStateV1(src) + case 2: + return readStateV2(src) + case 3: + return readStateV3(src) + case 4: + return readStateV4(src) + default: + thisVersion := tfversion.SemVer.String() + creatingVersion := sniffJSONStateTerraformVersion(src) + switch { + case creatingVersion != "": + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + fmt.Sprintf("The state file uses format version %d, which is not supported by Terraform %s. This state file was created by Terraform %s.", version, thisVersion, creatingVersion), + )) + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + fmt.Sprintf("The state file uses format version %d, which is not supported by Terraform %s. This state file may have been created by a newer version of Terraform.", version, thisVersion), + )) + } + return nil, diags + } +} + +func sniffJSONStateVersion(src []byte) (uint64, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + type VersionSniff struct { + Version *uint64 `json:"version"` + } + var sniff VersionSniff + err := json.Unmarshal(src, &sniff) + if err != nil { + switch tErr := err.(type) { + case *json.SyntaxError: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + fmt.Sprintf("The state file could not be parsed as JSON: syntax error at byte offset %d.", tErr.Offset), + )) + case *json.UnmarshalTypeError: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + fmt.Sprintf("The version in the state file is %s. A positive whole number is required.", tErr.Value), + )) + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + "The state file could not be parsed as JSON.", + )) + } + } + + if sniff.Version == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + "The state file does not have a \"version\" attribute, which is required to identify the format version.", + )) + return 0, diags + } + + return *sniff.Version, diags +} + +// sniffJSONStateTerraformVersion attempts to sniff the Terraform version +// specification from the given state file source code. The result is either +// a version string or an empty string if no version number could be extracted. +// +// This is a best-effort function intended to produce nicer error messages. It +// should not be used for any real processing. +func sniffJSONStateTerraformVersion(src []byte) string { + type VersionSniff struct { + Version string `json:"terraform_version"` + } + var sniff VersionSniff + + err := json.Unmarshal(src, &sniff) + if err != nil { + return "" + } + + // Attempt to parse the string as a version so we won't report garbage + // as a version number. + _, err = version.NewVersion(sniff.Version) + if err != nil { + return "" + } + + return sniff.Version +} + +// unsupportedFormat is a diagnostic summary message for when the state file +// seems to not be a state file at all, or is not a supported version. +// +// Use invalidFormat instead for the subtly-different case of "this looks like +// it's intended to be a state file but it's not structured correctly". +const unsupportedFormat = "Unsupported state file format" + +const upgradeFailed = "State format upgrade failed" diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version0.go b/vendor/github.com/hashicorp/terraform/states/statefile/version0.go new file mode 100644 index 0000000000..9b533317bd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statefile/version0.go @@ -0,0 +1,23 @@ +package statefile + +// looksLikeVersion0 sniffs for the signature indicating a version 0 state +// file. +// +// Version 0 was the number retroactively assigned to Terraform's initial +// (unversioned) binary state file format, which was later superseded by the +// version 1 format in JSON. +// +// Version 0 is no longer supported, so this is used only to detect it and +// return a nice error to the user. +func looksLikeVersion0(src []byte) bool { + // Version 0 files begin with the magic prefix "tfstate". + const magic = "tfstate" + if len(src) < len(magic) { + // Not even long enough to have the magic prefix + return false + } + if string(src[0:len(magic)]) == magic { + return true + } + return false +} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version1.go b/vendor/github.com/hashicorp/terraform/states/statefile/version1.go new file mode 100644 index 0000000000..80d711bc89 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statefile/version1.go @@ -0,0 +1,174 @@ +package statefile + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/terraform/tfdiags" +) + +func readStateV1(src []byte) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV1 := &stateV1{} + err := json.Unmarshal(src, sV1) + if err != nil { + diags = diags.Append(jsonUnmarshalDiags(err)) + return nil, diags + } + + file, prepDiags := prepareStateV1(sV1) + diags = diags.Append(prepDiags) + return file, diags +} + +func prepareStateV1(sV1 *stateV1) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV2, err := upgradeStateV1ToV2(sV1) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + upgradeFailed, + fmt.Sprintf("Error upgrading state file format from version 1 to version 2: %s.", err), + )) + return nil, diags + } + + file, prepDiags := prepareStateV2(sV2) + diags = diags.Append(prepDiags) + return file, diags +} + +// stateV1 is a representation of the legacy JSON state format version 1. +// +// It is only used to read version 1 JSON files prior to upgrading them to +// the current format. +type stateV1 struct { + // Version is the protocol version. "1" for a StateV1. + Version int `json:"version"` + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial int64 `json:"serial"` + + // Remote is used to track the metadata required to + // pull and push state files from a remote storage endpoint. + Remote *remoteStateV1 `json:"remote,omitempty"` + + // Modules contains all the modules in a breadth-first order + Modules []*moduleStateV1 `json:"modules"` +} + +type remoteStateV1 struct { + // Type controls the client we use for the remote state + Type string `json:"type"` + + // Config is used to store arbitrary configuration that + // is type specific + Config map[string]string `json:"config"` +} + +type moduleStateV1 struct { + // Path is the import path from the root module. Modules imports are + // always disjoint, so the path represents amodule tree + Path []string `json:"path"` + + // Outputs declared by the module and maintained for each module + // even though only the root module technically needs to be kept. + // This allows operators to inspect values at the boundaries. + Outputs map[string]string `json:"outputs"` + + // Resources is a mapping of the logically named resource to + // the state of the resource. Each resource may actually have + // N instances underneath, although a user only needs to think + // about the 1:1 case. + Resources map[string]*resourceStateV1 `json:"resources"` + + // Dependencies are a list of things that this module relies on + // existing to remain intact. For example: an module may depend + // on a VPC ID given by an aws_vpc resource. + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a module that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on,omitempty"` +} + +type resourceStateV1 struct { + // This is filled in and managed by Terraform, and is the resource + // type itself such as "mycloud_instance". If a resource provider sets + // this value, it won't be persisted. + Type string `json:"type"` + + // Dependencies are a list of things that this resource relies on + // existing to remain intact. For example: an AWS instance might + // depend on a subnet (which itself might depend on a VPC, and so + // on). + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a resource that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on,omitempty"` + + // Primary is the current active instance for this resource. + // It can be replaced but only after a successful creation. + // This is the instances on which providers will act. + Primary *instanceStateV1 `json:"primary"` + + // Tainted is used to track any underlying instances that + // have been created but are in a bad or unknown state and + // need to be cleaned up subsequently. In the + // standard case, there is only at most a single instance. + // However, in pathological cases, it is possible for the number + // of instances to accumulate. + Tainted []*instanceStateV1 `json:"tainted,omitempty"` + + // Deposed is used in the mechanics of CreateBeforeDestroy: the existing + // Primary is Deposed to get it out of the way for the replacement Primary to + // be created by Apply. If the replacement Primary creates successfully, the + // Deposed instance is cleaned up. If there were problems creating the + // replacement, the instance remains in the Deposed list so it can be + // destroyed in a future run. Functionally, Deposed instances are very + // similar to Tainted instances in that Terraform is only tracking them in + // order to remember to destroy them. + Deposed []*instanceStateV1 `json:"deposed,omitempty"` + + // Provider is used when a resource is connected to a provider with an alias. + // If this string is empty, the resource is connected to the default provider, + // e.g. "aws_instance" goes with the "aws" provider. + // If the resource block contained a "provider" key, that value will be set here. + Provider string `json:"provider,omitempty"` +} + +type instanceStateV1 struct { + // A unique ID for this resource. This is opaque to Terraform + // and is only meant as a lookup mechanism for the providers. + ID string `json:"id"` + + // Attributes are basic information about the resource. Any keys here + // are accessible in variable format within Terraform configurations: + // ${resourcetype.name.attribute}. + Attributes map[string]string `json:"attributes,omitempty"` + + // Meta is a simple K/V map that is persisted to the State but otherwise + // ignored by Terraform core. It's meant to be used for accounting by + // external client code. + Meta map[string]string `json:"meta,omitempty"` +} + +type ephemeralStateV1 struct { + // ConnInfo is used for the providers to export information which is + // used to connect to the resource for provisioning. For example, + // this could contain SSH or WinRM credentials. + ConnInfo map[string]string `json:"-"` +} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version1_upgrade.go b/vendor/github.com/hashicorp/terraform/states/statefile/version1_upgrade.go new file mode 100644 index 0000000000..0b417e1c40 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statefile/version1_upgrade.go @@ -0,0 +1,172 @@ +package statefile + +import ( + "fmt" + "log" + + "github.com/mitchellh/copystructure" +) + +// upgradeStateV1ToV2 is used to upgrade a V1 state representation +// into a V2 state representation +func upgradeStateV1ToV2(old *stateV1) (*stateV2, error) { + log.Printf("[TRACE] statefile.Read: upgrading format from v1 to v2") + if old == nil { + return nil, nil + } + + remote, err := old.Remote.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading State V1: %v", err) + } + + modules := make([]*moduleStateV2, len(old.Modules)) + for i, module := range old.Modules { + upgraded, err := module.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading State V1: %v", err) + } + modules[i] = upgraded + } + if len(modules) == 0 { + modules = nil + } + + newState := &stateV2{ + Version: 2, + Serial: old.Serial, + Remote: remote, + Modules: modules, + } + + return newState, nil +} + +func (old *remoteStateV1) upgradeToV2() (*remoteStateV2, error) { + if old == nil { + return nil, nil + } + + config, err := copystructure.Copy(old.Config) + if err != nil { + return nil, fmt.Errorf("Error upgrading RemoteState V1: %v", err) + } + + return &remoteStateV2{ + Type: old.Type, + Config: config.(map[string]string), + }, nil +} + +func (old *moduleStateV1) upgradeToV2() (*moduleStateV2, error) { + if old == nil { + return nil, nil + } + + pathRaw, err := copystructure.Copy(old.Path) + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) + } + path, ok := pathRaw.([]string) + if !ok { + return nil, fmt.Errorf("Error upgrading ModuleState V1: path is not a list of strings") + } + if len(path) == 0 { + // We found some V1 states with a nil path. Assume root. + path = []string{"root"} + } + + // Outputs needs upgrading to use the new structure + outputs := make(map[string]*outputStateV2) + for key, output := range old.Outputs { + outputs[key] = &outputStateV2{ + Type: "string", + Value: output, + Sensitive: false, + } + } + + resources := make(map[string]*resourceStateV2) + for key, oldResource := range old.Resources { + upgraded, err := oldResource.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) + } + resources[key] = upgraded + } + + dependencies, err := copystructure.Copy(old.Dependencies) + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) + } + + return &moduleStateV2{ + Path: path, + Outputs: outputs, + Resources: resources, + Dependencies: dependencies.([]string), + }, nil +} + +func (old *resourceStateV1) upgradeToV2() (*resourceStateV2, error) { + if old == nil { + return nil, nil + } + + dependencies, err := copystructure.Copy(old.Dependencies) + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) + } + + primary, err := old.Primary.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) + } + + deposed := make([]*instanceStateV2, len(old.Deposed)) + for i, v := range old.Deposed { + upgraded, err := v.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) + } + deposed[i] = upgraded + } + if len(deposed) == 0 { + deposed = nil + } + + return &resourceStateV2{ + Type: old.Type, + Dependencies: dependencies.([]string), + Primary: primary, + Deposed: deposed, + Provider: old.Provider, + }, nil +} + +func (old *instanceStateV1) upgradeToV2() (*instanceStateV2, error) { + if old == nil { + return nil, nil + } + + attributes, err := copystructure.Copy(old.Attributes) + if err != nil { + return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) + } + + meta, err := copystructure.Copy(old.Meta) + if err != nil { + return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) + } + + newMeta := make(map[string]interface{}) + for k, v := range meta.(map[string]string) { + newMeta[k] = v + } + + return &instanceStateV2{ + ID: old.ID, + Attributes: attributes.(map[string]string), + Meta: newMeta, + }, nil +} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version2.go b/vendor/github.com/hashicorp/terraform/states/statefile/version2.go new file mode 100644 index 0000000000..6fe2ab8ef8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statefile/version2.go @@ -0,0 +1,209 @@ +package statefile + +import ( + "encoding/json" + "fmt" + "sync" + + "github.com/hashicorp/terraform/tfdiags" +) + +func readStateV2(src []byte) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV2 := &stateV2{} + err := json.Unmarshal(src, sV2) + if err != nil { + diags = diags.Append(jsonUnmarshalDiags(err)) + return nil, diags + } + + file, prepDiags := prepareStateV2(sV2) + diags = diags.Append(prepDiags) + return file, diags +} + +func prepareStateV2(sV2 *stateV2) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV3, err := upgradeStateV2ToV3(sV2) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + upgradeFailed, + fmt.Sprintf("Error upgrading state file format from version 2 to version 3: %s.", err), + )) + return nil, diags + } + + file, prepDiags := prepareStateV3(sV3) + diags = diags.Append(prepDiags) + return file, diags +} + +// stateV2 is a representation of the legacy JSON state format version 2. +// +// It is only used to read version 2 JSON files prior to upgrading them to +// the current format. +type stateV2 struct { + // Version is the state file protocol version. + Version int `json:"version"` + + // TFVersion is the version of Terraform that wrote this state. + TFVersion string `json:"terraform_version,omitempty"` + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial int64 `json:"serial"` + + // Lineage is set when a new, blank state is created and then + // never updated. This allows us to determine whether the serials + // of two states can be meaningfully compared. + // Apart from the guarantee that collisions between two lineages + // are very unlikely, this value is opaque and external callers + // should only compare lineage strings byte-for-byte for equality. + Lineage string `json:"lineage"` + + // Remote is used to track the metadata required to + // pull and push state files from a remote storage endpoint. + Remote *remoteStateV2 `json:"remote,omitempty"` + + // Backend tracks the configuration for the backend in use with + // this state. This is used to track any changes in the backend + // configuration. + Backend *backendStateV2 `json:"backend,omitempty"` + + // Modules contains all the modules in a breadth-first order + Modules []*moduleStateV2 `json:"modules"` +} + +type remoteStateV2 struct { + // Type controls the client we use for the remote state + Type string `json:"type"` + + // Config is used to store arbitrary configuration that + // is type specific + Config map[string]string `json:"config"` +} + +type outputStateV2 struct { + // Sensitive describes whether the output is considered sensitive, + // which may lead to masking the value on screen in some cases. + Sensitive bool `json:"sensitive"` + // Type describes the structure of Value. Valid values are "string", + // "map" and "list" + Type string `json:"type"` + // Value contains the value of the output, in the structure described + // by the Type field. + Value interface{} `json:"value"` + + mu sync.Mutex +} + +type moduleStateV2 struct { + // Path is the import path from the root module. Modules imports are + // always disjoint, so the path represents amodule tree + Path []string `json:"path"` + + // Locals are kept only transiently in-memory, because we can always + // re-compute them. + Locals map[string]interface{} `json:"-"` + + // Outputs declared by the module and maintained for each module + // even though only the root module technically needs to be kept. + // This allows operators to inspect values at the boundaries. + Outputs map[string]*outputStateV2 `json:"outputs"` + + // Resources is a mapping of the logically named resource to + // the state of the resource. Each resource may actually have + // N instances underneath, although a user only needs to think + // about the 1:1 case. + Resources map[string]*resourceStateV2 `json:"resources"` + + // Dependencies are a list of things that this module relies on + // existing to remain intact. For example: an module may depend + // on a VPC ID given by an aws_vpc resource. + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a module that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` +} + +type resourceStateV2 struct { + // This is filled in and managed by Terraform, and is the resource + // type itself such as "mycloud_instance". If a resource provider sets + // this value, it won't be persisted. + Type string `json:"type"` + + // Dependencies are a list of things that this resource relies on + // existing to remain intact. For example: an AWS instance might + // depend on a subnet (which itself might depend on a VPC, and so + // on). + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a resource that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` + + // Primary is the current active instance for this resource. + // It can be replaced but only after a successful creation. + // This is the instances on which providers will act. + Primary *instanceStateV2 `json:"primary"` + + // Deposed is used in the mechanics of CreateBeforeDestroy: the existing + // Primary is Deposed to get it out of the way for the replacement Primary to + // be created by Apply. If the replacement Primary creates successfully, the + // Deposed instance is cleaned up. + // + // If there were problems creating the replacement Primary, the Deposed + // instance and the (now tainted) replacement Primary will be swapped so the + // tainted replacement will be cleaned up instead. + // + // An instance will remain in the Deposed list until it is successfully + // destroyed and purged. + Deposed []*instanceStateV2 `json:"deposed"` + + // Provider is used when a resource is connected to a provider with an alias. + // If this string is empty, the resource is connected to the default provider, + // e.g. "aws_instance" goes with the "aws" provider. + // If the resource block contained a "provider" key, that value will be set here. + Provider string `json:"provider"` + + mu sync.Mutex +} + +type instanceStateV2 struct { + // A unique ID for this resource. This is opaque to Terraform + // and is only meant as a lookup mechanism for the providers. + ID string `json:"id"` + + // Attributes are basic information about the resource. Any keys here + // are accessible in variable format within Terraform configurations: + // ${resourcetype.name.attribute}. + Attributes map[string]string `json:"attributes"` + + // Meta is a simple K/V map that is persisted to the State but otherwise + // ignored by Terraform core. It's meant to be used for accounting by + // external client code. The value here must only contain Go primitives + // and collections. + Meta map[string]interface{} `json:"meta"` + + // Tainted is used to mark a resource for recreation. + Tainted bool `json:"tainted"` +} + +type backendStateV2 struct { + Type string `json:"type"` // Backend type + ConfigRaw json.RawMessage `json:"config"` // Backend raw config + Hash int `json:"hash"` // Hash of portion of configuration from config files +} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version2_upgrade.go b/vendor/github.com/hashicorp/terraform/states/statefile/version2_upgrade.go new file mode 100644 index 0000000000..2d03c07c9d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statefile/version2_upgrade.go @@ -0,0 +1,145 @@ +package statefile + +import ( + "fmt" + "log" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/mitchellh/copystructure" +) + +func upgradeStateV2ToV3(old *stateV2) (*stateV3, error) { + if old == nil { + return (*stateV3)(nil), nil + } + + var new *stateV3 + { + copy, err := copystructure.Config{Lock: true}.Copy(old) + if err != nil { + panic(err) + } + newWrongType := copy.(*stateV2) + newRightType := (stateV3)(*newWrongType) + new = &newRightType + } + + // Set the new version number + new.Version = 3 + + // Change the counts for things which look like maps to use the % + // syntax. Remove counts for empty collections - they will be added + // back in later. + for _, module := range new.Modules { + for _, resource := range module.Resources { + // Upgrade Primary + if resource.Primary != nil { + upgradeAttributesV2ToV3(resource.Primary) + } + + // Upgrade Deposed + for _, deposed := range resource.Deposed { + upgradeAttributesV2ToV3(deposed) + } + } + } + + return new, nil +} + +func upgradeAttributesV2ToV3(instanceState *instanceStateV2) error { + collectionKeyRegexp := regexp.MustCompile(`^(.*\.)#$`) + collectionSubkeyRegexp := regexp.MustCompile(`^([^\.]+)\..*`) + + // Identify the key prefix of anything which is a collection + var collectionKeyPrefixes []string + for key := range instanceState.Attributes { + if submatches := collectionKeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { + collectionKeyPrefixes = append(collectionKeyPrefixes, submatches[0][1]) + } + } + sort.Strings(collectionKeyPrefixes) + + log.Printf("[STATE UPGRADE] Detected the following collections in state: %v", collectionKeyPrefixes) + + // This could be rolled into fewer loops, but it is somewhat clearer this way, and will not + // run very often. + for _, prefix := range collectionKeyPrefixes { + // First get the actual keys that belong to this prefix + var potentialKeysMatching []string + for key := range instanceState.Attributes { + if strings.HasPrefix(key, prefix) { + potentialKeysMatching = append(potentialKeysMatching, strings.TrimPrefix(key, prefix)) + } + } + sort.Strings(potentialKeysMatching) + + var actualKeysMatching []string + for _, key := range potentialKeysMatching { + if submatches := collectionSubkeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { + actualKeysMatching = append(actualKeysMatching, submatches[0][1]) + } else { + if key != "#" { + actualKeysMatching = append(actualKeysMatching, key) + } + } + } + actualKeysMatching = uniqueSortedStrings(actualKeysMatching) + + // Now inspect the keys in order to determine whether this is most likely to be + // a map, list or set. There is room for error here, so we log in each case. If + // there is no method of telling, we remove the key from the InstanceState in + // order that it will be recreated. Again, this could be rolled into fewer loops + // but we prefer clarity. + + oldCountKey := fmt.Sprintf("%s#", prefix) + + // First, detect "obvious" maps - which have non-numeric keys (mostly). + hasNonNumericKeys := false + for _, key := range actualKeysMatching { + if _, err := strconv.Atoi(key); err != nil { + hasNonNumericKeys = true + } + } + if hasNonNumericKeys { + newCountKey := fmt.Sprintf("%s%%", prefix) + + instanceState.Attributes[newCountKey] = instanceState.Attributes[oldCountKey] + delete(instanceState.Attributes, oldCountKey) + log.Printf("[STATE UPGRADE] Detected %s as a map. Replaced count = %s", + strings.TrimSuffix(prefix, "."), instanceState.Attributes[newCountKey]) + } + + // Now detect empty collections and remove them from state. + if len(actualKeysMatching) == 0 { + delete(instanceState.Attributes, oldCountKey) + log.Printf("[STATE UPGRADE] Detected %s as an empty collection. Removed from state.", + strings.TrimSuffix(prefix, ".")) + } + } + + return nil +} + +// uniqueSortedStrings removes duplicates from a slice of strings and returns +// a sorted slice of the unique strings. +func uniqueSortedStrings(input []string) []string { + uniquemap := make(map[string]struct{}) + for _, str := range input { + uniquemap[str] = struct{}{} + } + + output := make([]string, len(uniquemap)) + + i := 0 + for key := range uniquemap { + output[i] = key + i = i + 1 + } + + sort.Strings(output) + return output +} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version3.go b/vendor/github.com/hashicorp/terraform/states/statefile/version3.go new file mode 100644 index 0000000000..ab6414b0a7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statefile/version3.go @@ -0,0 +1,50 @@ +package statefile + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/terraform/tfdiags" +) + +func readStateV3(src []byte) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV3 := &stateV3{} + err := json.Unmarshal(src, sV3) + if err != nil { + diags = diags.Append(jsonUnmarshalDiags(err)) + return nil, diags + } + + file, prepDiags := prepareStateV3(sV3) + diags = diags.Append(prepDiags) + return file, diags +} + +func prepareStateV3(sV3 *stateV3) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV4, err := upgradeStateV3ToV4(sV3) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + upgradeFailed, + fmt.Sprintf("Error upgrading state file format from version 3 to version 4: %s.", err), + )) + return nil, diags + } + + file, prepDiags := prepareStateV4(sV4) + diags = diags.Append(prepDiags) + return file, diags +} + +// stateV2 is a representation of the legacy JSON state format version 3. +// +// It is only used to read version 3 JSON files prior to upgrading them to +// the current format. +// +// The differences between version 2 and version 3 are only in the data and +// not in the structure, so stateV3 actually shares the same structs as +// stateV2. Type stateV3 represents that the data within is formatted as +// expected by the V3 format, rather than the V2 format. +type stateV3 stateV2 diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version3_upgrade.go b/vendor/github.com/hashicorp/terraform/states/statefile/version3_upgrade.go new file mode 100644 index 0000000000..7ec1c94659 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statefile/version3_upgrade.go @@ -0,0 +1,415 @@ +package statefile + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" +) + +func upgradeStateV3ToV4(old *stateV3) (*stateV4, error) { + + if old.Serial < 0 { + // The new format is using uint64 here, which should be fine for any + // real state (we only used positive integers in practice) but we'll + // catch this explicitly here to avoid weird behavior if a state file + // has been tampered with in some way. + return nil, fmt.Errorf("state has serial less than zero, which is invalid") + } + + new := &stateV4{ + TerraformVersion: old.TFVersion, + Serial: uint64(old.Serial), + Lineage: old.Lineage, + RootOutputs: map[string]outputStateV4{}, + Resources: []resourceStateV4{}, + } + + if new.TerraformVersion == "" { + // Older formats considered this to be optional, but now it's required + // and so we'll stub it out with something that's definitely older + // than the version that really created this state. + new.TerraformVersion = "0.0.0" + } + + for _, msOld := range old.Modules { + if len(msOld.Path) < 1 || msOld.Path[0] != "root" { + return nil, fmt.Errorf("state contains invalid module path %#v", msOld.Path) + } + + // Convert legacy-style module address into our newer address type. + // Since these old formats are only generated by versions of Terraform + // that don't support count and for_each on modules, we can just assume + // all of the modules are unkeyed. + moduleAddr := make(addrs.ModuleInstance, len(msOld.Path)-1) + for i, name := range msOld.Path[1:] { + moduleAddr[i] = addrs.ModuleInstanceStep{ + Name: name, + InstanceKey: addrs.NoKey, + } + } + + // In a v3 state file, a "resource state" is actually an instance + // state, so we need to fill in a missing level of heirarchy here + // by lazily creating resource states as we encounter them. + // We'll track them in here, keyed on the string representation of + // the resource address. + resourceStates := map[string]*resourceStateV4{} + + for legacyAddr, rsOld := range msOld.Resources { + instAddr, err := parseLegacyResourceAddress(legacyAddr) + if err != nil { + return nil, err + } + + resAddr := instAddr.Resource + rs, exists := resourceStates[resAddr.String()] + if !exists { + var modeStr string + switch resAddr.Mode { + case addrs.ManagedResourceMode: + modeStr = "managed" + case addrs.DataResourceMode: + modeStr = "data" + default: + return nil, fmt.Errorf("state contains resource %s with an unsupported resource mode", resAddr) + } + + // In state versions prior to 4 we allowed each instance of a + // resource to have its own provider configuration address, + // which makes no real sense in practice because providers + // are associated with resources in the configuration. We + // elevate that to the resource level during this upgrade, + // implicitly taking the provider address of the first instance + // we encounter for each resource. While this is lossy in + // theory, in practice there is no reason for these values to + // differ between instances. + var providerAddr addrs.AbsProviderConfig + oldProviderAddr := rsOld.Provider + if strings.Contains(oldProviderAddr, "provider.") { + // Smells like a new-style provider address, but we'll test it. + var diags tfdiags.Diagnostics + providerAddr, diags = addrs.ParseAbsProviderConfigStr(oldProviderAddr) + if diags.HasErrors() { + return nil, diags.Err() + } + } else { + // Smells like an old-style module-local provider address, + // which we'll need to migrate. We'll assume it's referring + // to the same module the resource is in, which might be + // incorrect but it'll get fixed up next time any updates + // are made to an instance. + if oldProviderAddr != "" { + localAddr, diags := addrs.ParseProviderConfigCompactStr(oldProviderAddr) + if diags.HasErrors() { + return nil, diags.Err() + } + providerAddr = localAddr.Absolute(moduleAddr) + } else { + providerAddr = resAddr.DefaultProviderConfig().Absolute(moduleAddr) + } + } + + rs = &resourceStateV4{ + Module: moduleAddr.String(), + Mode: modeStr, + Type: resAddr.Type, + Name: resAddr.Name, + Instances: []instanceObjectStateV4{}, + ProviderConfig: providerAddr.String(), + } + resourceStates[resAddr.String()] = rs + } + + // Now we'll deal with the instance itself, which may either be + // the first instance in a resource we just created or an additional + // instance for a resource added on a prior loop. + instKey := instAddr.Key + if isOld := rsOld.Primary; isOld != nil { + isNew, err := upgradeInstanceObjectV3ToV4(rsOld, isOld, instKey, states.NotDeposed) + if err != nil { + return nil, fmt.Errorf("failed to migrate primary generation of %s: %s", instAddr, err) + } + rs.Instances = append(rs.Instances, *isNew) + } + for i, isOld := range rsOld.Deposed { + // When we migrate old instances we'll use sequential deposed + // keys just so that the upgrade result is deterministic. New + // deposed keys allocated moving forward will be pseudorandomly + // selected, but we check for collisions and so these + // non-random ones won't hurt. + deposedKey := states.DeposedKey(fmt.Sprintf("%08x", i+1)) + isNew, err := upgradeInstanceObjectV3ToV4(rsOld, isOld, instKey, deposedKey) + if err != nil { + return nil, fmt.Errorf("failed to migrate deposed generation index %d of %s: %s", i, instAddr, err) + } + rs.Instances = append(rs.Instances, *isNew) + } + + if instKey != addrs.NoKey && rs.EachMode == "" { + rs.EachMode = "list" + } + } + + for _, rs := range resourceStates { + new.Resources = append(new.Resources, *rs) + } + + if len(msOld.Path) == 1 && msOld.Path[0] == "root" { + // We'll migrate the outputs for this module too, then. + for name, oldOS := range msOld.Outputs { + newOS := outputStateV4{ + Sensitive: oldOS.Sensitive, + } + + valRaw := oldOS.Value + valSrc, err := json.Marshal(valRaw) + if err != nil { + // Should never happen, because this value came from JSON + // in the first place and so we're just round-tripping here. + return nil, fmt.Errorf("failed to serialize output %q value as JSON: %s", name, err) + } + + // The "type" field in state V2 wasn't really that useful + // since it was only able to capture string vs. list vs. map. + // For this reason, during upgrade we'll just discard it + // altogether and use cty's idea of the implied type of + // turning our old value into JSON. + ty, err := ctyjson.ImpliedType(valSrc) + if err != nil { + // REALLY should never happen, because we literally just + // encoded this as JSON above! + return nil, fmt.Errorf("failed to parse output %q value from JSON: %s", name, err) + } + + // ImpliedType tends to produce structural types, but since older + // version of Terraform didn't support those a collection type + // is probably what was intended, so we'll see if we can + // interpret our value as one. + ty = simplifyImpliedValueType(ty) + + tySrc, err := ctyjson.MarshalType(ty) + if err != nil { + return nil, fmt.Errorf("failed to serialize output %q type as JSON: %s", name, err) + } + + newOS.ValueRaw = json.RawMessage(valSrc) + newOS.ValueTypeRaw = json.RawMessage(tySrc) + + new.RootOutputs[name] = newOS + } + } + } + + new.normalize() + + return new, nil +} + +func upgradeInstanceObjectV3ToV4(rsOld *resourceStateV2, isOld *instanceStateV2, instKey addrs.InstanceKey, deposedKey states.DeposedKey) (*instanceObjectStateV4, error) { + + // Schema versions were, in prior formats, a private concern of the provider + // SDK, and not a first-class concept in the state format. Here we're + // sniffing for the pre-0.12 SDK's way of representing schema versions + // and promoting it to our first-class field if we find it. We'll ignore + // it if it doesn't look like what the SDK would've written. If this + // sniffing fails then we'll assume schema version 0. + var schemaVersion uint64 + migratedSchemaVersion := false + if raw, exists := isOld.Meta["schema_version"]; exists { + switch tv := raw.(type) { + case string: + v, err := strconv.ParseUint(tv, 10, 64) + if err == nil { + schemaVersion = v + migratedSchemaVersion = true + } + case int: + schemaVersion = uint64(tv) + migratedSchemaVersion = true + case float64: + schemaVersion = uint64(tv) + migratedSchemaVersion = true + } + } + + private := map[string]interface{}{} + for k, v := range isOld.Meta { + if k == "schema_version" && migratedSchemaVersion { + // We're gonna promote this into our first-class schema version field + continue + } + private[k] = v + } + var privateJSON []byte + if len(private) != 0 { + var err error + privateJSON, err = json.Marshal(private) + if err != nil { + // This shouldn't happen, because the Meta values all came from JSON + // originally anyway. + return nil, fmt.Errorf("cannot serialize private instance object data: %s", err) + } + } + + var status string + if isOld.Tainted { + status = "tainted" + } + + var instKeyRaw interface{} + switch tk := instKey.(type) { + case addrs.IntKey: + instKeyRaw = int(tk) + case addrs.StringKey: + instKeyRaw = string(tk) + default: + if instKeyRaw != nil { + return nil, fmt.Errorf("insupported instance key: %#v", instKey) + } + } + + var attributes map[string]string + if isOld.Attributes != nil { + attributes = make(map[string]string, len(isOld.Attributes)) + for k, v := range isOld.Attributes { + attributes[k] = v + } + } + if isOld.ID != "" { + // As a special case, if we don't already have an "id" attribute and + // yet there's a non-empty first-class ID on the old object then we'll + // create a synthetic id attribute to avoid losing that first-class id. + // In practice this generally arises only in tests where state literals + // are hand-written in a non-standard way; real code prior to 0.12 + // would always force the first-class ID to be copied into the + // id attribute before storing. + if attributes == nil { + attributes = make(map[string]string, len(isOld.Attributes)) + } + if idVal := attributes["id"]; idVal == "" { + attributes["id"] = isOld.ID + } + } + + dependencies := make([]string, len(rsOld.Dependencies)) + for i, v := range rsOld.Dependencies { + dependencies[i] = strings.TrimSuffix(v, ".*") + } + + return &instanceObjectStateV4{ + IndexKey: instKeyRaw, + Status: status, + Deposed: string(deposedKey), + AttributesFlat: attributes, + Dependencies: dependencies, + SchemaVersion: schemaVersion, + PrivateRaw: privateJSON, + }, nil +} + +// parseLegacyResourceAddress parses the different identifier format used +// state formats before version 4, like "instance.name.0". +func parseLegacyResourceAddress(s string) (addrs.ResourceInstance, error) { + var ret addrs.ResourceInstance + + // Split based on ".". Every resource address should have at least two + // elements (type and name). + parts := strings.Split(s, ".") + if len(parts) < 2 || len(parts) > 4 { + return ret, fmt.Errorf("invalid internal resource address format: %s", s) + } + + // Data resource if we have at least 3 parts and the first one is data + ret.Resource.Mode = addrs.ManagedResourceMode + if len(parts) > 2 && parts[0] == "data" { + ret.Resource.Mode = addrs.DataResourceMode + parts = parts[1:] + } + + // If we're not a data resource and we have more than 3, then it is an error + if len(parts) > 3 && ret.Resource.Mode != addrs.DataResourceMode { + return ret, fmt.Errorf("invalid internal resource address format: %s", s) + } + + // Build the parts of the resource address that are guaranteed to exist + ret.Resource.Type = parts[0] + ret.Resource.Name = parts[1] + ret.Key = addrs.NoKey + + // If we have more parts, then we have an index. Parse that. + if len(parts) > 2 { + idx, err := strconv.ParseInt(parts[2], 0, 0) + if err != nil { + return ret, fmt.Errorf("error parsing resource address %q: %s", s, err) + } + + ret.Key = addrs.IntKey(idx) + } + + return ret, nil +} + +// simplifyImpliedValueType attempts to heuristically simplify a value type +// derived from a legacy stored output value into something simpler that +// is closer to what would've fitted into the pre-v0.12 value type system. +func simplifyImpliedValueType(ty cty.Type) cty.Type { + switch { + case ty.IsTupleType(): + // If all of the element types are the same then we'll make this + // a list instead. This is very likely to be true, since prior versions + // of Terraform did not officially support mixed-type collections. + + if ty.Equals(cty.EmptyTuple) { + // Don't know what the element type would be, then. + return ty + } + + etys := ty.TupleElementTypes() + ety := etys[0] + for _, other := range etys[1:] { + if !other.Equals(ety) { + // inconsistent types + return ty + } + } + ety = simplifyImpliedValueType(ety) + return cty.List(ety) + + case ty.IsObjectType(): + // If all of the attribute types are the same then we'll make this + // a map instead. This is very likely to be true, since prior versions + // of Terraform did not officially support mixed-type collections. + + if ty.Equals(cty.EmptyObject) { + // Don't know what the element type would be, then. + return ty + } + + atys := ty.AttributeTypes() + var ety cty.Type + for _, other := range atys { + if ety == cty.NilType { + ety = other + continue + } + if !other.Equals(ety) { + // inconsistent types + return ty + } + } + ety = simplifyImpliedValueType(ety) + return cty.Map(ety) + + default: + // No other normalizations are possible + return ty + } +} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version4.go b/vendor/github.com/hashicorp/terraform/states/statefile/version4.go new file mode 100644 index 0000000000..ee8b652368 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statefile/version4.go @@ -0,0 +1,604 @@ +package statefile + +import ( + "encoding/json" + "fmt" + "io" + "sort" + + version "github.com/hashicorp/go-version" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" +) + +func readStateV4(src []byte) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV4 := &stateV4{} + err := json.Unmarshal(src, sV4) + if err != nil { + diags = diags.Append(jsonUnmarshalDiags(err)) + return nil, diags + } + + file, prepDiags := prepareStateV4(sV4) + diags = diags.Append(prepDiags) + return file, diags +} + +func prepareStateV4(sV4 *stateV4) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + var tfVersion *version.Version + if sV4.TerraformVersion != "" { + var err error + tfVersion, err = version.NewVersion(sV4.TerraformVersion) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid Terraform version string", + fmt.Sprintf("State file claims to have been written by Terraform version %q, which is not a valid version string.", sV4.TerraformVersion), + )) + } + } + + file := &File{ + TerraformVersion: tfVersion, + Serial: sV4.Serial, + Lineage: sV4.Lineage, + } + + state := states.NewState() + + for _, rsV4 := range sV4.Resources { + rAddr := addrs.Resource{ + Type: rsV4.Type, + Name: rsV4.Name, + } + switch rsV4.Mode { + case "managed": + rAddr.Mode = addrs.ManagedResourceMode + case "data": + rAddr.Mode = addrs.DataResourceMode + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource mode in state", + fmt.Sprintf("State contains a resource with mode %q (%q %q) which is not supported.", rsV4.Mode, rAddr.Type, rAddr.Name), + )) + continue + } + + moduleAddr := addrs.RootModuleInstance + if rsV4.Module != "" { + var addrDiags tfdiags.Diagnostics + moduleAddr, addrDiags = addrs.ParseModuleInstanceStr(rsV4.Module) + diags = diags.Append(addrDiags) + if addrDiags.HasErrors() { + continue + } + } + + providerAddr, addrDiags := addrs.ParseAbsProviderConfigStr(rsV4.ProviderConfig) + diags.Append(addrDiags) + if addrDiags.HasErrors() { + continue + } + + var eachMode states.EachMode + switch rsV4.EachMode { + case "": + eachMode = states.NoEach + case "list": + eachMode = states.EachList + case "map": + eachMode = states.EachMap + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource metadata in state", + fmt.Sprintf("Resource %s has invalid \"each\" value %q in state.", rAddr.Absolute(moduleAddr), eachMode), + )) + continue + } + + ms := state.EnsureModule(moduleAddr) + + // Ensure the resource container object is present in the state. + ms.SetResourceMeta(rAddr, eachMode, providerAddr) + + for _, isV4 := range rsV4.Instances { + keyRaw := isV4.IndexKey + var key addrs.InstanceKey + switch tk := keyRaw.(type) { + case int: + key = addrs.IntKey(tk) + case float64: + // Since JSON only has one number type, reading from encoding/json + // gives us a float64 here even if the number is whole. + // float64 has a smaller integer range than int, but in practice + // we rarely have more than a few tens of instances and so + // it's unlikely that we'll exhaust the 52 bits in a float64. + key = addrs.IntKey(int(tk)) + case string: + key = addrs.StringKey(tk) + default: + if keyRaw != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource instance metadata in state", + fmt.Sprintf("Resource %s has an instance with the invalid instance key %#v.", rAddr.Absolute(moduleAddr), keyRaw), + )) + continue + } + key = addrs.NoKey + } + + instAddr := rAddr.Instance(key) + + obj := &states.ResourceInstanceObjectSrc{ + SchemaVersion: isV4.SchemaVersion, + } + + { + // Instance attributes + switch { + case isV4.AttributesRaw != nil: + obj.AttrsJSON = isV4.AttributesRaw + case isV4.AttributesFlat != nil: + obj.AttrsFlat = isV4.AttributesFlat + default: + // This is odd, but we'll accept it and just treat the + // object has being empty. In practice this should arise + // only from the contrived sort of state objects we tend + // to hand-write inline in tests. + obj.AttrsJSON = []byte{'{', '}'} + } + } + + { + // Status + raw := isV4.Status + switch raw { + case "": + obj.Status = states.ObjectReady + case "tainted": + obj.Status = states.ObjectTainted + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource instance metadata in state", + fmt.Sprintf("Instance %s has invalid status %q.", instAddr.Absolute(moduleAddr), raw), + )) + continue + } + } + + if raw := isV4.PrivateRaw; len(raw) > 0 { + obj.Private = raw + } + + { + depsRaw := isV4.Dependencies + deps := make([]addrs.Referenceable, 0, len(depsRaw)) + for _, depRaw := range depsRaw { + ref, refDiags := addrs.ParseRefStr(depRaw) + diags = diags.Append(refDiags) + if refDiags.HasErrors() { + continue + } + if len(ref.Remaining) != 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource instance metadata in state", + fmt.Sprintf("Instance %s declares dependency on %q, which is not a reference to a dependable object.", instAddr.Absolute(moduleAddr), depRaw), + )) + } + if ref.Subject == nil { + // Should never happen + panic(fmt.Sprintf("parsing dependency %q for instance %s returned a nil address", depRaw, instAddr.Absolute(moduleAddr))) + } + deps = append(deps, ref.Subject) + } + obj.Dependencies = deps + } + + switch { + case isV4.Deposed != "": + dk := states.DeposedKey(isV4.Deposed) + if len(dk) != 8 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource instance metadata in state", + fmt.Sprintf("Instance %s has an object with deposed key %q, which is not correctly formatted.", instAddr.Absolute(moduleAddr), isV4.Deposed), + )) + continue + } + is := ms.ResourceInstance(instAddr) + if is.HasDeposed(dk) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Duplicate resource instance in state", + fmt.Sprintf("Instance %s deposed object %q appears multiple times in the state file.", instAddr.Absolute(moduleAddr), dk), + )) + continue + } + + ms.SetResourceInstanceDeposed(instAddr, dk, obj, providerAddr) + default: + is := ms.ResourceInstance(instAddr) + if is.HasCurrent() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Duplicate resource instance in state", + fmt.Sprintf("Instance %s appears multiple times in the state file.", instAddr.Absolute(moduleAddr)), + )) + continue + } + + ms.SetResourceInstanceCurrent(instAddr, obj, providerAddr) + } + } + + // We repeat this after creating the instances because + // SetResourceInstanceCurrent automatically resets this metadata based + // on the incoming objects. That behavior is useful when we're making + // piecemeal updates to the state during an apply, but when we're + // reading the state file we want to reflect its contents exactly. + ms.SetResourceMeta(rAddr, eachMode, providerAddr) + } + + // The root module is special in that we persist its attributes and thus + // need to reload them now. (For descendent modules we just re-calculate + // them based on the latest configuration on each run.) + { + rootModule := state.RootModule() + for name, fos := range sV4.RootOutputs { + os := &states.OutputValue{} + os.Sensitive = fos.Sensitive + + ty, err := ctyjson.UnmarshalType([]byte(fos.ValueTypeRaw)) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid output value type in state", + fmt.Sprintf("The state file has an invalid type specification for output %q: %s.", name, err), + )) + continue + } + + val, err := ctyjson.Unmarshal([]byte(fos.ValueRaw), ty) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid output value saved in state", + fmt.Sprintf("The state file has an invalid value for output %q: %s.", name, err), + )) + continue + } + + os.Value = val + rootModule.OutputValues[name] = os + } + } + + file.State = state + return file, diags +} + +func writeStateV4(file *File, w io.Writer) tfdiags.Diagnostics { + // Here we'll convert back from the "File" representation to our + // stateV4 struct representation and write that. + // + // While we support legacy state formats for reading, we only support the + // latest for writing and so if a V5 is added in future then this function + // should be deleted and replaced with a writeStateV5, even though the + // read/prepare V4 functions above would stick around. + + var diags tfdiags.Diagnostics + if file == nil || file.State == nil { + panic("attempt to write nil state to file") + } + + var terraformVersion string + if file.TerraformVersion != nil { + terraformVersion = file.TerraformVersion.String() + } + + sV4 := &stateV4{ + TerraformVersion: terraformVersion, + Serial: file.Serial, + Lineage: file.Lineage, + RootOutputs: map[string]outputStateV4{}, + Resources: []resourceStateV4{}, + } + + for name, os := range file.State.RootModule().OutputValues { + src, err := ctyjson.Marshal(os.Value, os.Value.Type()) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize output value in state", + fmt.Sprintf("An error occured while serializing output value %q: %s.", name, err), + )) + continue + } + + typeSrc, err := ctyjson.MarshalType(os.Value.Type()) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize output value in state", + fmt.Sprintf("An error occured while serializing the type of output value %q: %s.", name, err), + )) + continue + } + + sV4.RootOutputs[name] = outputStateV4{ + Sensitive: os.Sensitive, + ValueRaw: json.RawMessage(src), + ValueTypeRaw: json.RawMessage(typeSrc), + } + } + + for _, ms := range file.State.Modules { + moduleAddr := ms.Addr + for _, rs := range ms.Resources { + resourceAddr := rs.Addr + + var mode string + switch resourceAddr.Mode { + case addrs.ManagedResourceMode: + mode = "managed" + case addrs.DataResourceMode: + mode = "data" + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize resource in state", + fmt.Sprintf("Resource %s has mode %s, which cannot be serialized in state", resourceAddr.Absolute(moduleAddr), resourceAddr.Mode), + )) + continue + } + + var eachMode string + switch rs.EachMode { + case states.NoEach: + eachMode = "" + case states.EachList: + eachMode = "list" + case states.EachMap: + eachMode = "map" + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize resource in state", + fmt.Sprintf("Resource %s has \"each\" mode %s, which cannot be serialized in state", resourceAddr.Absolute(moduleAddr), rs.EachMode), + )) + continue + } + + sV4.Resources = append(sV4.Resources, resourceStateV4{ + Module: moduleAddr.String(), + Mode: mode, + Type: resourceAddr.Type, + Name: resourceAddr.Name, + EachMode: eachMode, + ProviderConfig: rs.ProviderConfig.String(), + Instances: []instanceObjectStateV4{}, + }) + rsV4 := &(sV4.Resources[len(sV4.Resources)-1]) + + for key, is := range rs.Instances { + if is.HasCurrent() { + var objDiags tfdiags.Diagnostics + rsV4.Instances, objDiags = appendInstanceObjectStateV4( + rs, is, key, is.Current, states.NotDeposed, + rsV4.Instances, + ) + diags = diags.Append(objDiags) + } + for dk, obj := range is.Deposed { + var objDiags tfdiags.Diagnostics + rsV4.Instances, objDiags = appendInstanceObjectStateV4( + rs, is, key, obj, dk, + rsV4.Instances, + ) + diags = diags.Append(objDiags) + } + } + } + } + + sV4.normalize() + + src, err := json.MarshalIndent(sV4, "", " ") + if err != nil { + // Shouldn't happen if we do our conversion to *stateV4 correctly above. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize state", + fmt.Sprintf("An error occured while serializing the state to save it. This is a bug in Terraform and should be reported: %s.", err), + )) + return diags + } + src = append(src, '\n') + + _, err = w.Write(src) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to write state", + fmt.Sprintf("An error occured while writing the serialized state: %s.", err), + )) + return diags + } + + return diags +} + +func appendInstanceObjectStateV4(rs *states.Resource, is *states.ResourceInstance, key addrs.InstanceKey, obj *states.ResourceInstanceObjectSrc, deposed states.DeposedKey, isV4s []instanceObjectStateV4) ([]instanceObjectStateV4, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + var status string + switch obj.Status { + case states.ObjectReady: + status = "" + case states.ObjectTainted: + status = "tainted" + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize resource instance in state", + fmt.Sprintf("Instance %s has status %s, which cannot be saved in state.", rs.Addr.Instance(key), obj.Status), + )) + } + + var privateRaw []byte + if len(obj.Private) > 0 { + privateRaw = obj.Private + } + + deps := make([]string, len(obj.Dependencies)) + for i, depAddr := range obj.Dependencies { + deps[i] = depAddr.String() + } + + var rawKey interface{} + switch tk := key.(type) { + case addrs.IntKey: + rawKey = int(tk) + case addrs.StringKey: + rawKey = string(tk) + default: + if key != addrs.NoKey { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize resource instance in state", + fmt.Sprintf("Instance %s has an unsupported instance key: %#v.", rs.Addr.Instance(key), key), + )) + } + } + + return append(isV4s, instanceObjectStateV4{ + IndexKey: rawKey, + Deposed: string(deposed), + Status: status, + SchemaVersion: obj.SchemaVersion, + AttributesFlat: obj.AttrsFlat, + AttributesRaw: obj.AttrsJSON, + PrivateRaw: privateRaw, + Dependencies: deps, + }), diags +} + +type stateV4 struct { + Version stateVersionV4 `json:"version"` + TerraformVersion string `json:"terraform_version"` + Serial uint64 `json:"serial"` + Lineage string `json:"lineage"` + RootOutputs map[string]outputStateV4 `json:"outputs"` + Resources []resourceStateV4 `json:"resources"` +} + +// normalize makes some in-place changes to normalize the way items are +// stored to ensure that two functionally-equivalent states will be stored +// identically. +func (s *stateV4) normalize() { + sort.Stable(sortResourcesV4(s.Resources)) + for _, rs := range s.Resources { + sort.Stable(sortInstancesV4(rs.Instances)) + } +} + +type outputStateV4 struct { + ValueRaw json.RawMessage `json:"value"` + ValueTypeRaw json.RawMessage `json:"type"` + Sensitive bool `json:"sensitive,omitempty"` +} + +type resourceStateV4 struct { + Module string `json:"module,omitempty"` + Mode string `json:"mode"` + Type string `json:"type"` + Name string `json:"name"` + EachMode string `json:"each,omitempty"` + ProviderConfig string `json:"provider"` + Instances []instanceObjectStateV4 `json:"instances"` +} + +type instanceObjectStateV4 struct { + IndexKey interface{} `json:"index_key,omitempty"` + Status string `json:"status,omitempty"` + Deposed string `json:"deposed,omitempty"` + + SchemaVersion uint64 `json:"schema_version"` + AttributesRaw json.RawMessage `json:"attributes,omitempty"` + AttributesFlat map[string]string `json:"attributes_flat,omitempty"` + + PrivateRaw []byte `json:"private,omitempty"` + + Dependencies []string `json:"depends_on,omitempty"` +} + +// stateVersionV4 is a weird special type we use to produce our hard-coded +// "version": 4 in the JSON serialization. +type stateVersionV4 struct{} + +func (sv stateVersionV4) MarshalJSON() ([]byte, error) { + return []byte{'4'}, nil +} + +func (sv stateVersionV4) UnmarshalJSON([]byte) error { + // Nothing to do: we already know we're version 4 + return nil +} + +type sortResourcesV4 []resourceStateV4 + +func (sr sortResourcesV4) Len() int { return len(sr) } +func (sr sortResourcesV4) Swap(i, j int) { sr[i], sr[j] = sr[j], sr[i] } +func (sr sortResourcesV4) Less(i, j int) bool { + switch { + case sr[i].Mode != sr[j].Mode: + return sr[i].Mode < sr[j].Mode + case sr[i].Type != sr[j].Type: + return sr[i].Type < sr[j].Type + case sr[i].Name != sr[j].Name: + return sr[i].Name < sr[j].Name + default: + return false + } +} + +type sortInstancesV4 []instanceObjectStateV4 + +func (si sortInstancesV4) Len() int { return len(si) } +func (si sortInstancesV4) Swap(i, j int) { si[i], si[j] = si[j], si[i] } +func (si sortInstancesV4) Less(i, j int) bool { + ki := si[i].IndexKey + kj := si[j].IndexKey + if ki != kj { + if (ki == nil) != (kj == nil) { + return ki == nil + } + if kii, isInt := ki.(int); isInt { + if kji, isInt := kj.(int); isInt { + return kii < kji + } + return true + } + if kis, isStr := ki.(string); isStr { + if kjs, isStr := kj.(string); isStr { + return kis < kjs + } + return true + } + } + if si[i].Deposed != si[j].Deposed { + return si[i].Deposed < si[j].Deposed + } + return false +} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/write.go b/vendor/github.com/hashicorp/terraform/states/statefile/write.go new file mode 100644 index 0000000000..cff3742d11 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statefile/write.go @@ -0,0 +1,12 @@ +package statefile + +import ( + "io" +) + +// Write writes the given state to the given writer in the current state +// serialization format. +func Write(s *File, w io.Writer) error { + diags := writeStateV4(s, w) + return diags.Err() +} diff --git a/vendor/github.com/hashicorp/terraform/states/sync.go b/vendor/github.com/hashicorp/terraform/states/sync.go new file mode 100644 index 0000000000..a37744673e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/sync.go @@ -0,0 +1,537 @@ +package states + +import ( + "log" + "sync" + + "github.com/hashicorp/terraform/addrs" + "github.com/zclconf/go-cty/cty" +) + +// SyncState is a wrapper around State that provides concurrency-safe access to +// various common operations that occur during a Terraform graph walk, or other +// similar concurrent contexts. +// +// When a SyncState wrapper is in use, no concurrent direct access to the +// underlying objects is permitted unless the caller first acquires an explicit +// lock, using the Lock and Unlock methods. Most callers should _not_ +// explicitly lock, and should instead use the other methods of this type that +// handle locking automatically. +// +// Since SyncState is able to safely consolidate multiple updates into a single +// atomic operation, many of its methods are at a higher level than those +// of the underlying types, and operate on the state as a whole rather than +// on individual sub-structures of the state. +// +// SyncState can only protect against races within its own methods. It cannot +// provide any guarantees about the order in which concurrent operations will +// be processed, so callers may still need to employ higher-level techniques +// for ensuring correct operation sequencing, such as building and walking +// a dependency graph. +type SyncState struct { + state *State + lock sync.RWMutex +} + +// Module returns a snapshot of the state of the module instance with the given +// address, or nil if no such module is tracked. +// +// The return value is a pointer to a copy of the module state, which the +// caller may then freely access and mutate. However, since the module state +// tends to be a large data structure with many child objects, where possible +// callers should prefer to use a more granular accessor to access a child +// module directly, and thus reduce the amount of copying required. +func (s *SyncState) Module(addr addrs.ModuleInstance) *Module { + s.lock.RLock() + ret := s.state.Module(addr).DeepCopy() + s.lock.RUnlock() + return ret +} + +// RemoveModule removes the entire state for the given module, taking with +// it any resources associated with the module. This should generally be +// called only for modules whose resources have all been destroyed, but +// that is not enforced by this method. +func (s *SyncState) RemoveModule(addr addrs.ModuleInstance) { + s.lock.Lock() + defer s.lock.Unlock() + + s.state.RemoveModule(addr) +} + +// OutputValue returns a snapshot of the state of the output value with the +// given address, or nil if no such output value is tracked. +// +// The return value is a pointer to a copy of the output value state, which the +// caller may then freely access and mutate. +func (s *SyncState) OutputValue(addr addrs.AbsOutputValue) *OutputValue { + s.lock.RLock() + ret := s.state.OutputValue(addr).DeepCopy() + s.lock.RUnlock() + return ret +} + +// SetOutputValue writes a given output value into the state, overwriting +// any existing value of the same name. +// +// If the module containing the output is not yet tracked in state then it +// be added as a side-effect. +func (s *SyncState) SetOutputValue(addr addrs.AbsOutputValue, value cty.Value, sensitive bool) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.EnsureModule(addr.Module) + ms.SetOutputValue(addr.OutputValue.Name, value, sensitive) +} + +// RemoveOutputValue removes the stored value for the output value with the +// given address. +// +// If this results in its containing module being empty, the module will be +// pruned from the state as a side-effect. +func (s *SyncState) RemoveOutputValue(addr addrs.AbsOutputValue) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return + } + ms.RemoveOutputValue(addr.OutputValue.Name) + s.maybePruneModule(addr.Module) +} + +// LocalValue returns the current value associated with the given local value +// address. +func (s *SyncState) LocalValue(addr addrs.AbsLocalValue) cty.Value { + s.lock.RLock() + // cty.Value is immutable, so we don't need any extra copying here. + ret := s.state.LocalValue(addr) + s.lock.RUnlock() + return ret +} + +// SetLocalValue writes a given output value into the state, overwriting +// any existing value of the same name. +// +// If the module containing the local value is not yet tracked in state then it +// will be added as a side-effect. +func (s *SyncState) SetLocalValue(addr addrs.AbsLocalValue, value cty.Value) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.EnsureModule(addr.Module) + ms.SetLocalValue(addr.LocalValue.Name, value) +} + +// RemoveLocalValue removes the stored value for the local value with the +// given address. +// +// If this results in its containing module being empty, the module will be +// pruned from the state as a side-effect. +func (s *SyncState) RemoveLocalValue(addr addrs.AbsLocalValue) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return + } + ms.RemoveLocalValue(addr.LocalValue.Name) + s.maybePruneModule(addr.Module) +} + +// Resource returns a snapshot of the state of the resource with the given +// address, or nil if no such resource is tracked. +// +// The return value is a pointer to a copy of the resource state, which the +// caller may then freely access and mutate. +func (s *SyncState) Resource(addr addrs.AbsResource) *Resource { + s.lock.RLock() + ret := s.state.Resource(addr).DeepCopy() + s.lock.RUnlock() + return ret +} + +// ResourceInstance returns a snapshot of the state the resource instance with +// the given address, or nil if no such instance is tracked. +// +// The return value is a pointer to a copy of the instance state, which the +// caller may then freely access and mutate. +func (s *SyncState) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstance { + s.lock.RLock() + ret := s.state.ResourceInstance(addr).DeepCopy() + s.lock.RUnlock() + return ret +} + +// ResourceInstanceObject returns a snapshot of the current instance object +// of the given generation belonging to the instance with the given address, +// or nil if no such object is tracked.. +// +// The return value is a pointer to a copy of the object, which the caller may +// then freely access and mutate. +func (s *SyncState) ResourceInstanceObject(addr addrs.AbsResourceInstance, gen Generation) *ResourceInstanceObjectSrc { + s.lock.RLock() + defer s.lock.RUnlock() + + inst := s.state.ResourceInstance(addr) + if inst == nil { + return nil + } + return inst.GetGeneration(gen).DeepCopy() +} + +// SetResourceMeta updates the resource-level metadata for the resource at +// the given address, creating the containing module state and resource state +// as a side-effect if not already present. +func (s *SyncState) SetResourceMeta(addr addrs.AbsResource, eachMode EachMode, provider addrs.AbsProviderConfig) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.EnsureModule(addr.Module) + ms.SetResourceMeta(addr.Resource, eachMode, provider) +} + +// RemoveResource removes the entire state for the given resource, taking with +// it any instances associated with the resource. This should generally be +// called only for resource objects whose instances have all been destroyed, +// but that is not enforced by this method. (Use RemoveResourceIfEmpty instead +// to safely check first.) +func (s *SyncState) RemoveResource(addr addrs.AbsResource) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.EnsureModule(addr.Module) + ms.RemoveResource(addr.Resource) + s.maybePruneModule(addr.Module) +} + +// RemoveResourceIfEmpty is similar to RemoveResource but first checks to +// make sure there are no instances or objects left in the resource. +// +// Returns true if the resource was removed, or false if remaining child +// objects prevented its removal. Returns true also if the resource was +// already absent, and thus no action needed to be taken. +func (s *SyncState) RemoveResourceIfEmpty(addr addrs.AbsResource) bool { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return true // nothing to do + } + rs := ms.Resource(addr.Resource) + if rs == nil { + return true // nothing to do + } + if len(rs.Instances) != 0 { + // We don't check here for the possibility of instances that exist + // but don't have any objects because it's the responsibility of the + // instance-mutation methods to prune those away automatically. + return false + } + ms.RemoveResource(addr.Resource) + s.maybePruneModule(addr.Module) + return true +} + +// MaybeFixUpResourceInstanceAddressForCount deals with the situation where a +// resource has changed from having "count" set to not set, or vice-versa, and +// so we need to rename the zeroth instance key to no key at all, or vice-versa. +// +// Set countEnabled to true if the resource has count set in its new +// configuration, or false if it does not. +// +// The state is modified in-place if necessary, moving a resource instance +// between the two addresses. The return value is true if a change was made, +// and false otherwise. +func (s *SyncState) MaybeFixUpResourceInstanceAddressForCount(addr addrs.AbsResource, countEnabled bool) bool { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return false + } + + relAddr := addr.Resource + rs := ms.Resource(relAddr) + if rs == nil { + return false + } + huntKey := addrs.NoKey + replaceKey := addrs.InstanceKey(addrs.IntKey(0)) + if !countEnabled { + huntKey, replaceKey = replaceKey, huntKey + } + + is, exists := rs.Instances[huntKey] + if !exists { + return false + } + + if _, exists := rs.Instances[replaceKey]; exists { + // If the replacement key also exists then we'll do nothing and keep both. + return false + } + + // If we get here then we need to "rename" from hunt to replace + rs.Instances[replaceKey] = is + delete(rs.Instances, huntKey) + return true +} + +// SetResourceInstanceCurrent saves the given instance object as the current +// generation of the resource instance with the given address, simulataneously +// updating the recorded provider configuration address, dependencies, and +// resource EachMode. +// +// Any existing current instance object for the given resource is overwritten. +// Set obj to nil to remove the primary generation object altogether. If there +// are no deposed objects then the instance as a whole will be removed, which +// may in turn also remove the containing module if it becomes empty. +// +// The caller must ensure that the given ResourceInstanceObject is not +// concurrently mutated during this call, but may be freely used again once +// this function returns. +// +// The provider address and "each mode" are resource-wide settings and so they +// are updated for all other instances of the same resource as a side-effect of +// this call. +// +// If the containing module for this resource or the resource itself are not +// already tracked in state then they will be added as a side-effect. +func (s *SyncState) SetResourceInstanceCurrent(addr addrs.AbsResourceInstance, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.EnsureModule(addr.Module) + ms.SetResourceInstanceCurrent(addr.Resource, obj.DeepCopy(), provider) + s.maybePruneModule(addr.Module) +} + +// SetResourceInstanceDeposed saves the given instance object as a deposed +// generation of the resource instance with the given address and deposed key. +// +// Call this method only for pre-existing deposed objects that already have +// a known DeposedKey. For example, this method is useful if reloading objects +// that were persisted to a state file. To mark the current object as deposed, +// use DeposeResourceInstanceObject instead. +// +// The caller must ensure that the given ResourceInstanceObject is not +// concurrently mutated during this call, but may be freely used again once +// this function returns. +// +// The resource that contains the given instance must already exist in the +// state, or this method will panic. Use Resource to check first if its +// presence is not already guaranteed. +// +// Any existing current instance object for the given resource and deposed key +// is overwritten. Set obj to nil to remove the deposed object altogether. If +// the instance is left with no objects after this operation then it will +// be removed from its containing resource altogether. +// +// If the containing module for this resource or the resource itself are not +// already tracked in state then they will be added as a side-effect. +func (s *SyncState) SetResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.EnsureModule(addr.Module) + ms.SetResourceInstanceDeposed(addr.Resource, key, obj.DeepCopy(), provider) + s.maybePruneModule(addr.Module) +} + +// DeposeResourceInstanceObject moves the current instance object for the +// given resource instance address into the deposed set, leaving the instance +// without a current object. +// +// The return value is the newly-allocated deposed key, or NotDeposed if the +// given instance is already lacking a current object. +// +// If the containing module for this resource or the resource itself are not +// already tracked in state then there cannot be a current object for the +// given instance, and so NotDeposed will be returned without modifying the +// state at all. +func (s *SyncState) DeposeResourceInstanceObject(addr addrs.AbsResourceInstance) DeposedKey { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return NotDeposed + } + + return ms.deposeResourceInstanceObject(addr.Resource, NotDeposed) +} + +// DeposeResourceInstanceObjectForceKey is like DeposeResourceInstanceObject +// but uses a pre-allocated key. It's the caller's responsibility to ensure +// that there aren't any races to use a particular key; this method will panic +// if the given key is already in use. +func (s *SyncState) DeposeResourceInstanceObjectForceKey(addr addrs.AbsResourceInstance, forcedKey DeposedKey) { + s.lock.Lock() + defer s.lock.Unlock() + + if forcedKey == NotDeposed { + // Usage error: should use DeposeResourceInstanceObject in this case + panic("DeposeResourceInstanceObjectForceKey called without forced key") + } + + ms := s.state.Module(addr.Module) + if ms == nil { + return // Nothing to do, since there can't be any current object either. + } + + ms.deposeResourceInstanceObject(addr.Resource, forcedKey) +} + +// ForgetResourceInstanceAll removes the record of all objects associated with +// the specified resource instance, if present. If not present, this is a no-op. +func (s *SyncState) ForgetResourceInstanceAll(addr addrs.AbsResourceInstance) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return + } + ms.ForgetResourceInstanceAll(addr.Resource) + s.maybePruneModule(addr.Module) +} + +// ForgetResourceInstanceDeposed removes the record of the deposed object with +// the given address and key, if present. If not present, this is a no-op. +func (s *SyncState) ForgetResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return + } + ms.ForgetResourceInstanceDeposed(addr.Resource, key) + s.maybePruneModule(addr.Module) +} + +// MaybeRestoreResourceInstanceDeposed will restore the deposed object with the +// given key on the specified resource as the current object for that instance +// if and only if that would not cause us to forget an existing current +// object for that instance. +// +// Returns true if the object was restored to current, or false if no change +// was made at all. +func (s *SyncState) MaybeRestoreResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey) bool { + s.lock.Lock() + defer s.lock.Unlock() + + if key == NotDeposed { + panic("MaybeRestoreResourceInstanceDeposed called without DeposedKey") + } + + ms := s.state.Module(addr.Module) + if ms == nil { + // Nothing to do, since the specified deposed object cannot exist. + return false + } + + return ms.maybeRestoreResourceInstanceDeposed(addr.Resource, key) +} + +// RemovePlannedResourceInstanceObjects removes from the state any resource +// instance objects that have the status ObjectPlanned, indiciating that they +// are just transient placeholders created during planning. +// +// Note that this does not restore any "ready" or "tainted" object that might +// have been present before the planned object was written. The only real use +// for this method is in preparing the state created during a refresh walk, +// where we run the planning step for certain instances just to create enough +// information to allow correct expression evaluation within provider and +// data resource blocks. Discarding planned instances in that case is okay +// because the refresh phase only creates planned objects to stand in for +// objects that don't exist yet, and thus the planned object must have been +// absent before by definition. +func (s *SyncState) RemovePlannedResourceInstanceObjects() { + // TODO: Merge together the refresh and plan phases into a single walk, + // so we can remove the need to create this "partial plan" during refresh + // that we then need to clean up before proceeding. + + s.lock.Lock() + defer s.lock.Unlock() + + for _, ms := range s.state.Modules { + moduleAddr := ms.Addr + + for _, rs := range ms.Resources { + resAddr := rs.Addr + + for ik, is := range rs.Instances { + instAddr := resAddr.Instance(ik) + + if is.Current != nil && is.Current.Status == ObjectPlanned { + // Setting the current instance to nil removes it from the + // state altogether if there are not also deposed instances. + ms.SetResourceInstanceCurrent(instAddr, nil, rs.ProviderConfig) + } + + for dk, obj := range is.Deposed { + // Deposed objects should never be "planned", but we'll + // do this anyway for the sake of completeness. + if obj.Status == ObjectPlanned { + ms.ForgetResourceInstanceDeposed(instAddr, dk) + } + } + } + } + + // We may have deleted some objects, which means that we may have + // left a module empty, and so we must prune to preserve the invariant + // that only the root module is allowed to be empty. + s.maybePruneModule(moduleAddr) + } +} + +// Lock acquires an explicit lock on the state, allowing direct read and write +// access to the returned state object. The caller must call Unlock once +// access is no longer needed, and then immediately discard the state pointer +// pointer. +// +// Most callers should not use this. Instead, use the concurrency-safe +// accessors and mutators provided directly on SyncState. +func (s *SyncState) Lock() *State { + s.lock.Lock() + return s.state +} + +// Unlock releases a lock previously acquired by Lock, at which point the +// caller must cease all use of the state pointer that was returned. +// +// Do not call this method except to end an explicit lock acquired by +// Lock. If a caller calls Unlock without first holding the lock, behavior +// is undefined. +func (s *SyncState) Unlock() { + s.lock.Unlock() +} + +// maybePruneModule will remove a module from the state altogether if it is +// empty, unless it's the root module which must always be present. +// +// This helper method is not concurrency-safe on its own, so must only be +// called while the caller is already holding the lock for writing. +func (s *SyncState) maybePruneModule(addr addrs.ModuleInstance) { + if addr.IsRoot() { + // We never prune the root. + return + } + + ms := s.state.Module(addr) + if ms == nil { + return + } + + if ms.empty() { + log.Printf("[TRACE] states.SyncState: pruning %s because it is empty", addr) + s.state.RemoveModule(addr) + } +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/disco/disco.go b/vendor/github.com/hashicorp/terraform/svchost/disco/disco.go index 7fc49da9cb..1963cbd18a 100644 --- a/vendor/github.com/hashicorp/terraform/svchost/disco/disco.go +++ b/vendor/github.com/hashicorp/terraform/svchost/disco/disco.go @@ -8,6 +8,7 @@ package disco import ( "encoding/json" "errors" + "fmt" "io" "io/ioutil" "log" @@ -17,28 +18,36 @@ import ( "time" cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/terraform/httpclient" "github.com/hashicorp/terraform/svchost" "github.com/hashicorp/terraform/svchost/auth" ) const ( - discoPath = "/.well-known/terraform.json" - maxRedirects = 3 // arbitrary-but-small number to prevent runaway redirect loops - discoTimeout = 11 * time.Second // arbitrary-but-small time limit to prevent UI "hangs" during discovery - maxDiscoDocBytes = 1 * 1024 * 1024 // 1MB - to prevent abusive services from using loads of our memory + // Fixed path to the discovery manifest. + discoPath = "/.well-known/terraform.json" + + // Arbitrary-but-small number to prevent runaway redirect loops. + maxRedirects = 3 + + // Arbitrary-but-small time limit to prevent UI "hangs" during discovery. + discoTimeout = 11 * time.Second + + // 1MB - to prevent abusive services from using loads of our memory. + maxDiscoDocBytes = 1 * 1024 * 1024 ) -var httpTransport = cleanhttp.DefaultPooledTransport() // overridden during tests, to skip TLS verification +// httpTransport is overridden during tests, to skip TLS verification. +var httpTransport = cleanhttp.DefaultPooledTransport() // Disco is the main type in this package, which allows discovery on given // hostnames and caches the results by hostname to avoid repeated requests // for the same information. type Disco struct { - hostCache map[svchost.Hostname]Host + hostCache map[svchost.Hostname]*Host credsSrc auth.CredentialsSource // Transport is a custom http.RoundTripper to use. - // A package default is used if this is nil. Transport http.RoundTripper } @@ -50,7 +59,11 @@ func New() *Disco { // NewWithCredentialsSource returns a new discovery object initialized with // the given credentials source. func NewWithCredentialsSource(credsSrc auth.CredentialsSource) *Disco { - return &Disco{credsSrc: credsSrc} + return &Disco{ + hostCache: make(map[svchost.Hostname]*Host), + credsSrc: credsSrc, + Transport: httpTransport, + } } // SetCredentialsSource provides a credentials source that will be used to @@ -64,11 +77,11 @@ func (d *Disco) SetCredentialsSource(src auth.CredentialsSource) { // CredentialsForHost returns a non-nil HostCredentials if the embedded source has // credentials available for the host, and a nil HostCredentials if it does not. -func (d *Disco) CredentialsForHost(host svchost.Hostname) (auth.HostCredentials, error) { +func (d *Disco) CredentialsForHost(hostname svchost.Hostname) (auth.HostCredentials, error) { if d.credsSrc == nil { return nil, nil } - return d.credsSrc.ForHost(host) + return d.credsSrc.ForHost(hostname) } // ForceHostServices provides a pre-defined set of services for a given @@ -81,20 +94,20 @@ func (d *Disco) CredentialsForHost(host svchost.Hostname) (auth.HostCredentials, // discovery, yielding the same results as if the given map were published // at the host's default discovery URL, though using absolute URLs is strongly // recommended to make the configured behavior more explicit. -func (d *Disco) ForceHostServices(host svchost.Hostname, services map[string]interface{}) { - if d.hostCache == nil { - d.hostCache = map[svchost.Hostname]Host{} - } +func (d *Disco) ForceHostServices(hostname svchost.Hostname, services map[string]interface{}) { if services == nil { services = map[string]interface{}{} } - d.hostCache[host] = Host{ + + d.hostCache[hostname] = &Host{ discoURL: &url.URL{ Scheme: "https", - Host: string(host), + Host: string(hostname), Path: discoPath, }, - services: services, + hostname: hostname.ForDisplay(), + services: services, + transport: d.Transport, } } @@ -104,138 +117,143 @@ func (d *Disco) ForceHostServices(host svchost.Hostname, services map[string]int // // If a given hostname supports no Terraform services at all, a non-nil but // empty Host object is returned. When giving feedback to the end user about -// such situations, we say e.g. "the host doesn't provide a module -// registry", regardless of whether that is due to that service specifically -// being absent or due to the host not providing Terraform services at all, -// since we don't wish to expose the detail of whole-host discovery to an -// end-user. -func (d *Disco) Discover(host svchost.Hostname) Host { - if d.hostCache == nil { - d.hostCache = map[svchost.Hostname]Host{} - } - if cache, cached := d.hostCache[host]; cached { - return cache - } - - ret := d.discover(host) - d.hostCache[host] = ret - return ret +// such situations, we say "host does not provide a service", +// regardless of whether that is due to that service specifically being absent +// or due to the host not providing Terraform services at all, since we don't +// wish to expose the detail of whole-host discovery to an end-user. +func (d *Disco) Discover(hostname svchost.Hostname) (*Host, error) { + if host, cached := d.hostCache[hostname]; cached { + return host, nil + } + + host, err := d.discover(hostname) + if err != nil { + return nil, err + } + d.hostCache[hostname] = host + + return host, nil } // DiscoverServiceURL is a convenience wrapper for discovery on a given // hostname and then looking up a particular service in the result. -func (d *Disco) DiscoverServiceURL(host svchost.Hostname, serviceID string) *url.URL { - return d.Discover(host).ServiceURL(serviceID) +func (d *Disco) DiscoverServiceURL(hostname svchost.Hostname, serviceID string) (*url.URL, error) { + host, err := d.Discover(hostname) + if err != nil { + return nil, err + } + return host.ServiceURL(serviceID) } // discover implements the actual discovery process, with its result cached // by the public-facing Discover method. -func (d *Disco) discover(host svchost.Hostname) Host { +func (d *Disco) discover(hostname svchost.Hostname) (*Host, error) { discoURL := &url.URL{ Scheme: "https", - Host: host.String(), + Host: hostname.String(), Path: discoPath, } - t := d.Transport - if t == nil { - t = httpTransport - } - client := &http.Client{ - Transport: t, + Transport: d.Transport, Timeout: discoTimeout, CheckRedirect: func(req *http.Request, via []*http.Request) error { log.Printf("[DEBUG] Service discovery redirected to %s", req.URL) if len(via) > maxRedirects { - return errors.New("too many redirects") // (this error message will never actually be seen) + return errors.New("too many redirects") // this error will never actually be seen } return nil }, } req := &http.Request{ + Header: make(http.Header), Method: "GET", URL: discoURL, } + req.Header.Set("Accept", "application/json") + req.Header.Set("User-Agent", httpclient.UserAgentString()) - if creds, err := d.CredentialsForHost(host); err != nil { - log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", host, err) - } else if creds != nil { - creds.PrepareRequest(req) // alters req to include credentials + creds, err := d.CredentialsForHost(hostname) + if err != nil { + log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", hostname, err) } - - log.Printf("[DEBUG] Service discovery for %s at %s", host, discoURL) - - ret := Host{ - discoURL: discoURL, + if creds != nil { + // Update the request to include credentials. + creds.PrepareRequest(req) } + log.Printf("[DEBUG] Service discovery for %s at %s", hostname, discoURL) + resp, err := client.Do(req) if err != nil { - log.Printf("[WARN] Failed to request discovery document: %s", err) - return ret // empty + return nil, fmt.Errorf("Failed to request discovery document: %v", err) } defer resp.Body.Close() - if resp.StatusCode != 200 { - log.Printf("[WARN] Failed to request discovery document: %s", resp.Status) - return ret // empty + host := &Host{ + // Use the discovery URL from resp.Request in + // case the client followed any redirects. + discoURL: resp.Request.URL, + hostname: hostname.ForDisplay(), + transport: d.Transport, + } + + // Return the host without any services. + if resp.StatusCode == 404 { + return host, nil } - // If the client followed any redirects, we will have a new URL to use - // as our base for relative resolution. - ret.discoURL = resp.Request.URL + if resp.StatusCode != 200 { + return nil, fmt.Errorf("Failed to request discovery document: %s", resp.Status) + } contentType := resp.Header.Get("Content-Type") mediaType, _, err := mime.ParseMediaType(contentType) if err != nil { - log.Printf("[WARN] Discovery URL has malformed Content-Type %q", contentType) - return ret // empty + return nil, fmt.Errorf("Discovery URL has a malformed Content-Type %q", contentType) } if mediaType != "application/json" { - log.Printf("[DEBUG] Discovery URL returned Content-Type %q, rather than application/json", mediaType) - return ret // empty + return nil, fmt.Errorf("Discovery URL returned an unsupported Content-Type %q", mediaType) } - // (this doesn't catch chunked encoding, because ContentLength is -1 in that case...) + // This doesn't catch chunked encoding, because ContentLength is -1 in that case. if resp.ContentLength > maxDiscoDocBytes { // Size limit here is not a contractual requirement and so we may // adjust it over time if we find a different limit is warranted. - log.Printf("[WARN] Discovery doc response is too large (got %d bytes; limit %d)", resp.ContentLength, maxDiscoDocBytes) - return ret // empty + return nil, fmt.Errorf( + "Discovery doc response is too large (got %d bytes; limit %d)", + resp.ContentLength, maxDiscoDocBytes, + ) } - // If the response is using chunked encoding then we can't predict - // its size, but we'll at least prevent reading the entire thing into - // memory. + // If the response is using chunked encoding then we can't predict its + // size, but we'll at least prevent reading the entire thing into memory. lr := io.LimitReader(resp.Body, maxDiscoDocBytes) servicesBytes, err := ioutil.ReadAll(lr) if err != nil { - log.Printf("[WARN] Error reading discovery document body: %s", err) - return ret // empty + return nil, fmt.Errorf("Error reading discovery document body: %v", err) } var services map[string]interface{} err = json.Unmarshal(servicesBytes, &services) if err != nil { - log.Printf("[WARN] Failed to decode discovery document as a JSON object: %s", err) - return ret // empty + return nil, fmt.Errorf("Failed to decode discovery document as a JSON object: %v", err) } + host.services = services - ret.services = services - return ret + return host, nil } // Forget invalidates any cached record of the given hostname. If the host // has no cache entry then this is a no-op. -func (d *Disco) Forget(host svchost.Hostname) { - delete(d.hostCache, host) +func (d *Disco) Forget(hostname svchost.Hostname) { + delete(d.hostCache, hostname) } // ForgetAll is like Forget, but for all of the hostnames that have cache entries. func (d *Disco) ForgetAll() { - d.hostCache = nil + d.hostCache = make(map[svchost.Hostname]*Host) } diff --git a/vendor/github.com/hashicorp/terraform/svchost/disco/host.go b/vendor/github.com/hashicorp/terraform/svchost/disco/host.go index faf58220af..ab9514c4f3 100644 --- a/vendor/github.com/hashicorp/terraform/svchost/disco/host.go +++ b/vendor/github.com/hashicorp/terraform/svchost/disco/host.go @@ -1,51 +1,264 @@ package disco import ( + "encoding/json" + "fmt" + "log" + "net/http" "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform/httpclient" ) +const versionServiceID = "versions.v1" + +// Host represents a service discovered host. type Host struct { - discoURL *url.URL - services map[string]interface{} + discoURL *url.URL + hostname string + services map[string]interface{} + transport http.RoundTripper +} + +// Constraints represents the version constraints of a service. +type Constraints struct { + Service string `json:"service"` + Product string `json:"product"` + Minimum string `json:"minimum"` + Maximum string `json:"maximum"` + Excluding []string `json:"excluding"` +} + +// ErrServiceNotProvided is returned when the service is not provided. +type ErrServiceNotProvided struct { + hostname string + service string +} + +// Error returns a customized error message. +func (e *ErrServiceNotProvided) Error() string { + if e.hostname == "" { + return fmt.Sprintf("host does not provide a %s service", e.service) + } + return fmt.Sprintf("host %s does not provide a %s service", e.hostname, e.service) +} + +// ErrVersionNotSupported is returned when the version is not supported. +type ErrVersionNotSupported struct { + hostname string + service string + version string +} + +// Error returns a customized error message. +func (e *ErrVersionNotSupported) Error() string { + if e.hostname == "" { + return fmt.Sprintf("host does not support %s version %s", e.service, e.version) + } + return fmt.Sprintf("host %s does not support %s version %s", e.hostname, e.service, e.version) +} + +// ErrNoVersionConstraints is returned when checkpoint was disabled +// or the endpoint to query for version constraints was unavailable. +type ErrNoVersionConstraints struct { + disabled bool +} + +// Error returns a customized error message. +func (e *ErrNoVersionConstraints) Error() string { + if e.disabled { + return "checkpoint disabled" + } + return "unable to contact versions service" } // ServiceURL returns the URL associated with the given service identifier, // which should be of the form "servicename.vN". // -// A non-nil result is always an absolute URL with a scheme of either https -// or http. -// -// If the requested service is not supported by the host, this method returns -// a nil URL. -// -// If the discovery document entry for the given service is invalid (not a URL), -// it is treated as absent, also returning a nil URL. -func (h Host) ServiceURL(id string) *url.URL { - if h.services == nil { - return nil // no services supported for an empty Host +// A non-nil result is always an absolute URL with a scheme of either HTTPS +// or HTTP. +func (h *Host) ServiceURL(id string) (*url.URL, error) { + svc, ver, err := parseServiceID(id) + if err != nil { + return nil, err + } + + // No services supported for an empty Host. + if h == nil || h.services == nil { + return nil, &ErrServiceNotProvided{service: svc} } urlStr, ok := h.services[id].(string) if !ok { - return nil + // See if we have a matching service as that would indicate + // the service is supported, but not the requested version. + for serviceID := range h.services { + if strings.HasPrefix(serviceID, svc+".") { + return nil, &ErrVersionNotSupported{ + hostname: h.hostname, + service: svc, + version: ver.Original(), + } + } + } + + // No discovered services match the requested service. + return nil, &ErrServiceNotProvided{hostname: h.hostname, service: svc} } - ret, err := url.Parse(urlStr) + u, err := url.Parse(urlStr) if err != nil { - return nil + return nil, fmt.Errorf("Failed to parse service URL: %v", err) + } + + // Make relative URLs absolute using our discovery URL. + if !u.IsAbs() { + u = h.discoURL.ResolveReference(u) } - if !ret.IsAbs() { - ret = h.discoURL.ResolveReference(ret) // make absolute using our discovery doc URL + + if u.Scheme != "https" && u.Scheme != "http" { + return nil, fmt.Errorf("Service URL is using an unsupported scheme: %s", u.Scheme) } - if ret.Scheme != "https" && ret.Scheme != "http" { - return nil + if u.User != nil { + return nil, fmt.Errorf("Embedded username/password information is not permitted") } - if ret.User != nil { - // embedded username/password information is not permitted; credentials - // are handled out of band. - return nil + + // Fragment part is irrelevant, since we're not a browser. + u.Fragment = "" + + return h.discoURL.ResolveReference(u), nil +} + +// VersionConstraints returns the contraints for a given service identifier +// (which should be of the form "servicename.vN") and product. +// +// When an exact (service and version) match is found, the constraints for +// that service are returned. +// +// When the requested version is not provided but the service is, we will +// search for all alternative versions. If mutliple alternative versions +// are found, the contrains of the latest available version are returned. +// +// When a service is not provided at all an error will be returned instead. +// +// When checkpoint is disabled or when a 404 is returned after making the +// HTTP call, an ErrNoVersionConstraints error will be returned. +func (h *Host) VersionConstraints(id, product string) (*Constraints, error) { + svc, _, err := parseServiceID(id) + if err != nil { + return nil, err + } + + // Return early if checkpoint is disabled. + if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" { + return nil, &ErrNoVersionConstraints{disabled: true} + } + + // No services supported for an empty Host. + if h == nil || h.services == nil { + return nil, &ErrServiceNotProvided{service: svc} + } + + // Try to get the service URL for the version service and + // return early if the service isn't provided by the host. + u, err := h.ServiceURL(versionServiceID) + if err != nil { + return nil, err + } + + // Check if we have an exact (service and version) match. + if _, ok := h.services[id].(string); !ok { + // If we don't have an exact match, we search for all matching + // services and then use the service ID of the latest version. + var services []string + for serviceID := range h.services { + if strings.HasPrefix(serviceID, svc+".") { + services = append(services, serviceID) + } + } + + if len(services) == 0 { + // No discovered services match the requested service. + return nil, &ErrServiceNotProvided{hostname: h.hostname, service: svc} + } + + // Set id to the latest service ID we found. + var latest *version.Version + for _, serviceID := range services { + if _, ver, err := parseServiceID(serviceID); err == nil { + if latest == nil || latest.LessThan(ver) { + id = serviceID + latest = ver + } + } + } + } + + // Set a default timeout of 1 sec for the versions request (in milliseconds) + timeout := 1000 + if v, err := strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")); err == nil { + timeout = v + } + + client := &http.Client{ + Transport: h.transport, + Timeout: time.Duration(timeout) * time.Millisecond, + } + + // Prepare the service URL by setting the service and product. + v := u.Query() + v.Set("product", product) + u.Path += id + u.RawQuery = v.Encode() + + // Create a new request. + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, fmt.Errorf("Failed to create version constraints request: %v", err) + } + req.Header.Set("Accept", "application/json") + req.Header.Set("User-Agent", httpclient.UserAgentString()) + + log.Printf("[DEBUG] Retrieve version constraints for service %s and product %s", id, product) + + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("Failed to request version constraints: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode == 404 { + return nil, &ErrNoVersionConstraints{disabled: false} + } + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("Failed to request version constraints: %s", resp.Status) + } + + // Parse the constraints from the response body. + result := &Constraints{} + if err := json.NewDecoder(resp.Body).Decode(result); err != nil { + return nil, fmt.Errorf("Error parsing version constraints: %v", err) + } + + return result, nil +} + +func parseServiceID(id string) (string, *version.Version, error) { + parts := strings.SplitN(id, ".", 2) + if len(parts) != 2 { + return "", nil, fmt.Errorf("Invalid service ID format (i.e. service.vN): %s", id) + } + + version, err := version.NewVersion(parts[1]) + if err != nil { + return "", nil, fmt.Errorf("Invalid service version: %v", err) } - ret.Fragment = "" // fragment part is irrelevant, since we're not a browser - return h.discoURL.ResolveReference(ret) + return parts[0], version, nil } diff --git a/vendor/github.com/hashicorp/terraform/terraform/context.go b/vendor/github.com/hashicorp/terraform/terraform/context.go index f133cc20ad..afdba99c1c 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/context.go +++ b/vendor/github.com/hashicorp/terraform/terraform/context.go @@ -1,20 +1,26 @@ package terraform import ( + "bytes" "context" "fmt" "log" - "sort" "strings" "sync" - "github.com/hashicorp/terraform/tfdiags" - - "github.com/hashicorp/go-multierror" "github.com/hashicorp/hcl" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/version" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/lang" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/provisioners" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/tfdiags" ) // InputMode defines what sort of input will be asked for when Input @@ -51,19 +57,18 @@ var ( // ContextOpts are the user-configurable options to create a context with // NewContext. type ContextOpts struct { - Meta *ContextMeta - Destroy bool - Diff *Diff - Hooks []Hook - Module *module.Tree - Parallelism int - State *State - StateFutureAllowed bool - ProviderResolver ResourceProviderResolver - Provisioners map[string]ResourceProvisionerFactory - Shadow bool - Targets []string - Variables map[string]interface{} + Config *configs.Config + Changes *plans.Changes + State *states.State + Targets []addrs.Targetable + Variables InputValues + Meta *ContextMeta + Destroy bool + + Hooks []Hook + Parallelism int + ProviderResolver providers.Resolver + Provisioners map[string]ProvisionerFactory // If non-nil, will apply as additional constraints on the provider // plugins that will be requested from the provider resolver. @@ -83,32 +88,25 @@ type ContextMeta struct { // Context represents all the context that Terraform needs in order to // perform operations on infrastructure. This structure is built using -// NewContext. See the documentation for that. -// -// Extra functions on Context can be found in context_*.go files. +// NewContext. type Context struct { - // Maintainer note: Anytime this struct is changed, please verify - // that newShadowContext still does the right thing. Tests should - // fail regardless but putting this note here as well. + config *configs.Config + changes *plans.Changes + state *states.State + targets []addrs.Targetable + variables InputValues + meta *ContextMeta + destroy bool - components contextComponentFactory - destroy bool - diff *Diff - diffLock sync.RWMutex hooks []Hook - meta *ContextMeta - module *module.Tree + components contextComponentFactory + schemas *Schemas sh *stopHook - shadow bool - state *State - stateLock sync.RWMutex - targets []string uiInput UIInput - variables map[string]interface{} l sync.Mutex // Lock acquired during any task parallelSem Semaphore - providerInputConfig map[string]map[string]interface{} + providerInputConfig map[string]map[string]cty.Value providerSHA256s map[string][]byte runLock sync.Mutex runCond *sync.Cond @@ -117,17 +115,23 @@ type Context struct { shadowErr error } +// (additional methods on Context can be found in context_*.go files.) + // NewContext creates a new Context structure. // -// Once a Context is creator, the pointer values within ContextOpts -// should not be mutated in any way, since the pointers are copied, not -// the values themselves. -func NewContext(opts *ContextOpts) (*Context, error) { - // Validate the version requirement if it is given - if opts.Module != nil { - if err := CheckRequiredVersion(opts.Module); err != nil { - return nil, err - } +// Once a Context is created, the caller must not access or mutate any of +// the objects referenced (directly or indirectly) by the ContextOpts fields. +// +// If the returned diagnostics contains errors then the resulting context is +// invalid and must not be used. +func NewContext(opts *ContextOpts) (*Context, tfdiags.Diagnostics) { + log.Printf("[TRACE] terraform.NewContext: starting") + diags := CheckCoreVersionRequirements(opts.Config) + // If version constraints are not met then we'll bail early since otherwise + // we're likely to just see a bunch of other errors related to + // incompatibilities, which could be overwhelming for the user. + if diags.HasErrors() { + return nil, diags } // Copy all the hooks and add our stop hook. We don't append directly @@ -139,21 +143,9 @@ func NewContext(opts *ContextOpts) (*Context, error) { state := opts.State if state == nil { - state = new(State) - state.init() - } - - // If our state is from the future, then error. Callers can avoid - // this error by explicitly setting `StateFutureAllowed`. - if err := CheckStateVersion(state); err != nil && !opts.StateFutureAllowed { - return nil, err + state = states.NewState() } - // Explicitly reset our state version to our current version so that - // any operations we do will write out that our latest version - // has run. - state.TFVersion = version.Version - // Determine parallelism, default to 10. We do this both to limit // CPU pressure but also to have an extra guard against rate throttling // from providers. @@ -168,60 +160,84 @@ func NewContext(opts *ContextOpts) (*Context, error) { // 2 - Take values specified in -var flags, overriding values // set by environment variables if necessary. This includes // values taken from -var-file in addition. - variables := make(map[string]interface{}) - if opts.Module != nil { - var err error - variables, err = Variables(opts.Module, opts.Variables) - if err != nil { - return nil, err - } + var variables InputValues + if opts.Config != nil { + // Default variables from the configuration seed our map. + variables = DefaultVariableValues(opts.Config.Module.Variables) } + // Variables provided by the caller (from CLI, environment, etc) can + // override the defaults. + variables = variables.Override(opts.Variables) // Bind available provider plugins to the constraints in config - var providers map[string]ResourceProviderFactory + var providerFactories map[string]providers.Factory if opts.ProviderResolver != nil { - var err error - deps := ModuleTreeDependencies(opts.Module, state) + deps := ConfigTreeDependencies(opts.Config, state) reqd := deps.AllPluginRequirements() if opts.ProviderSHA256s != nil && !opts.SkipProviderVerify { reqd.LockExecutables(opts.ProviderSHA256s) } - providers, err = resourceProviderFactories(opts.ProviderResolver, reqd) - if err != nil { - return nil, err + log.Printf("[TRACE] terraform.NewContext: resolving provider version selections") + + var providerDiags tfdiags.Diagnostics + providerFactories, providerDiags = resourceProviderFactories(opts.ProviderResolver, reqd) + diags = diags.Append(providerDiags) + + if diags.HasErrors() { + return nil, diags } } else { - providers = make(map[string]ResourceProviderFactory) + providerFactories = make(map[string]providers.Factory) } - diff := opts.Diff - if diff == nil { - diff = &Diff{} + components := &basicComponentFactory{ + providers: providerFactories, + provisioners: opts.Provisioners, } + log.Printf("[TRACE] terraform.NewContext: loading provider schemas") + schemas, err := LoadSchemas(opts.Config, opts.State, components) + if err != nil { + diags = diags.Append(err) + return nil, diags + } + + changes := opts.Changes + if changes == nil { + changes = plans.NewChanges() + } + + config := opts.Config + if config == nil { + config = configs.NewEmptyConfig() + } + + log.Printf("[TRACE] terraform.NewContext: complete") + return &Context{ - components: &basicComponentFactory{ - providers: providers, - provisioners: opts.Provisioners, - }, - destroy: opts.Destroy, - diff: diff, - hooks: hooks, - meta: opts.Meta, - module: opts.Module, - shadow: opts.Shadow, - state: state, - targets: opts.Targets, - uiInput: opts.UIInput, - variables: variables, + components: components, + schemas: schemas, + destroy: opts.Destroy, + changes: changes, + hooks: hooks, + meta: opts.Meta, + config: config, + state: state, + targets: opts.Targets, + uiInput: opts.UIInput, + variables: variables, parallelSem: NewSemaphore(par), - providerInputConfig: make(map[string]map[string]interface{}), + providerInputConfig: make(map[string]map[string]cty.Value), providerSHA256s: opts.ProviderSHA256s, sh: sh, }, nil } +func (c *Context) Schemas() *Schemas { + return c.schemas +} + type ContextGraphOpts struct { // If true, validates the graph structure (checks for cycles). Validate bool @@ -233,7 +249,7 @@ type ContextGraphOpts struct { // Graph returns the graph used for the given operation type. // // The most extensive or complex graph type is GraphTypePlan. -func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, error) { +func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, tfdiags.Diagnostics) { if opts == nil { opts = &ContextGraphOpts{Validate: true} } @@ -242,65 +258,71 @@ func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, error) { switch typ { case GraphTypeApply: return (&ApplyGraphBuilder{ - Module: c.module, - Diff: c.diff, - State: c.state, - Providers: c.components.ResourceProviders(), - Provisioners: c.components.ResourceProvisioners(), - Targets: c.targets, - Destroy: c.destroy, - Validate: opts.Validate, - }).Build(RootModulePath) - - case GraphTypeInput: - // The input graph is just a slightly modified plan graph - fallthrough + Config: c.config, + Changes: c.changes, + State: c.state, + Components: c.components, + Schemas: c.schemas, + Targets: c.targets, + Destroy: c.destroy, + Validate: opts.Validate, + }).Build(addrs.RootModuleInstance) + case GraphTypeValidate: // The validate graph is just a slightly modified plan graph fallthrough case GraphTypePlan: // Create the plan graph builder p := &PlanGraphBuilder{ - Module: c.module, - State: c.state, - Providers: c.components.ResourceProviders(), - Targets: c.targets, - Validate: opts.Validate, + Config: c.config, + State: c.state, + Components: c.components, + Schemas: c.schemas, + Targets: c.targets, + Validate: opts.Validate, } // Some special cases for other graph types shared with plan currently var b GraphBuilder = p switch typ { - case GraphTypeInput: - b = InputGraphBuilder(p) case GraphTypeValidate: - // We need to set the provisioners so those can be validated - p.Provisioners = c.components.ResourceProvisioners() - b = ValidateGraphBuilder(p) } - return b.Build(RootModulePath) + return b.Build(addrs.RootModuleInstance) case GraphTypePlanDestroy: return (&DestroyPlanGraphBuilder{ - Module: c.module, - State: c.state, - Targets: c.targets, - Validate: opts.Validate, - }).Build(RootModulePath) + Config: c.config, + State: c.state, + Components: c.components, + Schemas: c.schemas, + Targets: c.targets, + Validate: opts.Validate, + }).Build(addrs.RootModuleInstance) case GraphTypeRefresh: return (&RefreshGraphBuilder{ - Module: c.module, - State: c.state, - Providers: c.components.ResourceProviders(), - Targets: c.targets, - Validate: opts.Validate, - }).Build(RootModulePath) - } + Config: c.config, + State: c.state, + Components: c.components, + Schemas: c.schemas, + Targets: c.targets, + Validate: opts.Validate, + }).Build(addrs.RootModuleInstance) + + case GraphTypeEval: + return (&EvalGraphBuilder{ + Config: c.config, + State: c.state, + Components: c.components, + Schemas: c.schemas, + }).Build(addrs.RootModuleInstance) - return nil, fmt.Errorf("unknown graph type: %s", typ) + default: + // Should never happen, because the above is exhaustive for all graph types. + panic(fmt.Errorf("unsupported graph type %s", typ)) + } } // ShadowError returns any errors caught during a shadow operation. @@ -333,141 +355,72 @@ func (c *Context) ShadowError() error { // State returns a copy of the current state associated with this context. // // This cannot safely be called in parallel with any other Context function. -func (c *Context) State() *State { +func (c *Context) State() *states.State { return c.state.DeepCopy() } -// Interpolater returns an Interpolater built on a copy of the state -// that can be used to test interpolation values. -func (c *Context) Interpolater() *Interpolater { - var varLock sync.Mutex - var stateLock sync.RWMutex - return &Interpolater{ - Operation: walkApply, - Meta: c.meta, - Module: c.module, - State: c.state.DeepCopy(), - StateLock: &stateLock, - VariableValues: c.variables, - VariableValuesLock: &varLock, - } -} - -// Input asks for input to fill variables and provider configurations. -// This modifies the configuration in-place, so asking for Input twice -// may result in different UI output showing different current values. -func (c *Context) Input(mode InputMode) error { - defer c.acquireRun("input")() - - if mode&InputModeVar != 0 { - // Walk the variables first for the root module. We walk them in - // alphabetical order for UX reasons. - rootConf := c.module.Config() - names := make([]string, len(rootConf.Variables)) - m := make(map[string]*config.Variable) - for i, v := range rootConf.Variables { - names[i] = v.Name - m[v.Name] = v - } - sort.Strings(names) - for _, n := range names { - // If we only care about unset variables, then if the variable - // is set, continue on. - if mode&InputModeVarUnset != 0 { - if _, ok := c.variables[n]; ok { - continue - } - } - - var valueType config.VariableType - - v := m[n] - switch valueType = v.Type(); valueType { - case config.VariableTypeUnknown: - continue - case config.VariableTypeMap: - // OK - case config.VariableTypeList: - // OK - case config.VariableTypeString: - // OK - default: - panic(fmt.Sprintf("Unknown variable type: %#v", v.Type())) - } - - // If the variable is not already set, and the variable defines a - // default, use that for the value. - if _, ok := c.variables[n]; !ok { - if v.Default != nil { - c.variables[n] = v.Default.(string) - continue - } - } - - // this should only happen during tests - if c.uiInput == nil { - log.Println("[WARN] Content.uiInput is nil") - continue - } - - // Ask the user for a value for this variable - var value string - retry := 0 - for { - var err error - value, err = c.uiInput.Input(&InputOpts{ - Id: fmt.Sprintf("var.%s", n), - Query: fmt.Sprintf("var.%s", n), - Description: v.Description, - }) - if err != nil { - return fmt.Errorf( - "Error asking for %s: %s", n, err) - } - - if value == "" && v.Required() { - // Redo if it is required, but abort if we keep getting - // blank entries - if retry > 2 { - return fmt.Errorf("missing required value for %q", n) - } - retry++ - continue - } - - break - } - - // no value provided, so don't set the variable at all - if value == "" { - continue - } - - decoded, err := parseVariableAsHCL(n, value, valueType) - if err != nil { - return err - } - - if decoded != nil { - c.variables[n] = decoded - } - } - } +// Eval produces a scope in which expressions can be evaluated for +// the given module path. +// +// This method must first evaluate any ephemeral values (input variables, local +// values, and output values) in the configuration. These ephemeral values are +// not included in the persisted state, so they must be re-computed using other +// values in the state before they can be properly evaluated. The updated +// values are retained in the main state associated with the receiving context. +// +// This function takes no action against remote APIs but it does need access +// to all provider and provisioner instances in order to obtain their schemas +// for type checking. +// +// The result is an evaluation scope that can be used to resolve references +// against the root module. If the returned diagnostics contains errors then +// the returned scope may be nil. If it is not nil then it may still be used +// to attempt expression evaluation or other analysis, but some expressions +// may not behave as expected. +func (c *Context) Eval(path addrs.ModuleInstance) (*lang.Scope, tfdiags.Diagnostics) { + // This is intended for external callers such as the "terraform console" + // command. Internally, we create an evaluator in c.walk before walking + // the graph, and create scopes in ContextGraphWalker. - if mode&InputModeProvider != 0 { - // Build the graph - graph, err := c.Graph(GraphTypeInput, nil) - if err != nil { - return err - } + var diags tfdiags.Diagnostics + defer c.acquireRun("eval")() - // Do the walk - if _, err := c.walk(graph, walkInput); err != nil { - return err - } - } + // Start with a copy of state so that we don't affect any instances + // that other methods may have already returned. + c.state = c.state.DeepCopy() + var walker *ContextGraphWalker + + graph, graphDiags := c.Graph(GraphTypeEval, nil) + diags = diags.Append(graphDiags) + if !diags.HasErrors() { + var walkDiags tfdiags.Diagnostics + walker, walkDiags = c.walk(graph, walkEval) + diags = diags.Append(walker.NonFatalDiagnostics) + diags = diags.Append(walkDiags) + } + + if walker == nil { + // If we skipped walking the graph (due to errors) then we'll just + // use a placeholder graph walker here, which'll refer to the + // unmodified state. + walker = c.graphWalker(walkEval) + } + + // This is a bit weird since we don't normally evaluate outside of + // the context of a walk, but we'll "re-enter" our desired path here + // just to get hold of an EvalContext for it. GraphContextBuiltin + // caches its contexts, so we should get hold of the context that was + // previously used for evaluation here, unless we skipped walking. + evalCtx := walker.EnterPath(path) + return evalCtx.EvaluationScope(nil, EvalDataForNoInstanceKey), diags +} - return nil +// Interpolater is no longer used. Use Evaluator instead. +// +// The interpolator returned from this function will return an error on any use. +func (c *Context) Interpolater() *Interpolater { + // FIXME: Remove this once all callers are updated to no longer use it. + return &Interpolater{} } // Apply applies the changes represented by this context and returns @@ -484,23 +437,16 @@ func (c *Context) Input(mode InputMode) error { // State() method. Currently the helper/resource testing framework relies // on the absence of a returned state to determine if Destroy can be // called, so that will need to be refactored before this can be changed. -func (c *Context) Apply() (*State, error) { +func (c *Context) Apply() (*states.State, tfdiags.Diagnostics) { defer c.acquireRun("apply")() - // Check there are no empty target parameter values - for _, target := range c.targets { - if target == "" { - return nil, fmt.Errorf("Target parameter must not have empty value") - } - } - // Copy our own state c.state = c.state.DeepCopy() // Build the graph. - graph, err := c.Graph(GraphTypeApply, nil) - if err != nil { - return nil, err + graph, diags := c.Graph(GraphTypeApply, nil) + if diags.HasErrors() { + return nil, diags } // Determine the operation @@ -510,15 +456,30 @@ func (c *Context) Apply() (*State, error) { } // Walk the graph - walker, err := c.walk(graph, operation) - if len(walker.ValidationErrors) > 0 { - err = multierror.Append(err, walker.ValidationErrors...) - } - - // Clean out any unused things - c.state.prune() - - return c.state, err + walker, walkDiags := c.walk(graph, operation) + diags = diags.Append(walker.NonFatalDiagnostics) + diags = diags.Append(walkDiags) + + if c.destroy && !diags.HasErrors() { + // If we know we were trying to destroy objects anyway, and we + // completed without any errors, then we'll also prune out any + // leftover empty resource husks (left after all of the instances + // of a resource with "count" or "for_each" are destroyed) to + // help ensure we end up with an _actually_ empty state, assuming + // we weren't destroying with -target here. + // + // (This doesn't actually take into account -target, but that should + // be okay because it doesn't throw away anything we can't recompute + // on a subsequent "terraform plan" run, if the resources are still + // present in the configuration. However, this _will_ cause "count = 0" + // resources to read as unknown during the next refresh walk, which + // may cause some additional churn if used in a data resource or + // provider block, until we remove refreshing as a separate walk and + // just do it as part of the plan walk.) + c.state.PruneResourceHusks() + } + + return c.state, diags } // Plan generates an execution plan for the given context. @@ -528,38 +489,45 @@ func (c *Context) Apply() (*State, error) { // // Plan also updates the diff of this context to be the diff generated // by the plan, so Apply can be called after. -func (c *Context) Plan() (*Plan, error) { +func (c *Context) Plan() (*plans.Plan, tfdiags.Diagnostics) { defer c.acquireRun("plan")() + c.changes = plans.NewChanges() - // Check there are no empty target parameter values - for _, target := range c.targets { - if target == "" { - return nil, fmt.Errorf("Target parameter must not have empty value") + var diags tfdiags.Diagnostics + + varVals := make(map[string]plans.DynamicValue, len(c.variables)) + for k, iv := range c.variables { + // We use cty.DynamicPseudoType here so that we'll save both the + // value _and_ its dynamic type in the plan, so we can recover + // exactly the same value later. + dv, err := plans.NewDynamicValue(iv.Value, cty.DynamicPseudoType) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to prepare variable value for plan", + fmt.Sprintf("The value for variable %q could not be serialized to store in the plan: %s.", k, err), + )) + continue } + varVals[k] = dv } - p := &Plan{ - Module: c.module, - Vars: c.variables, - State: c.state, - Targets: c.targets, - - TerraformVersion: version.String(), - ProviderSHA256s: c.providerSHA256s, + p := &plans.Plan{ + VariableValues: varVals, + TargetAddrs: c.targets, + ProviderSHA256s: c.providerSHA256s, } var operation walkOperation if c.destroy { operation = walkPlanDestroy - p.Destroy = true } else { // Set our state to be something temporary. We do this so that // the plan can update a fake state so that variables work, then // we replace it back with our old state. old := c.state if old == nil { - c.state = &State{} - c.state.init() + c.state = states.NewState() } else { c.state = old.DeepCopy() } @@ -570,57 +538,27 @@ func (c *Context) Plan() (*Plan, error) { operation = walkPlan } - // Setup our diff - c.diffLock.Lock() - c.diff = new(Diff) - c.diff.init() - c.diffLock.Unlock() - // Build the graph. graphType := GraphTypePlan if c.destroy { graphType = GraphTypePlanDestroy } - graph, err := c.Graph(graphType, nil) - if err != nil { - return nil, err + graph, graphDiags := c.Graph(graphType, nil) + diags = diags.Append(graphDiags) + if graphDiags.HasErrors() { + return nil, diags } // Do the walk - walker, err := c.walk(graph, operation) - if err != nil { - return nil, err + walker, walkDiags := c.walk(graph, operation) + diags = diags.Append(walker.NonFatalDiagnostics) + diags = diags.Append(walkDiags) + if walkDiags.HasErrors() { + return nil, diags } - p.Diff = c.diff - - // If this is true, it means we're running unit tests. In this case, - // we perform a deep copy just to ensure that all context tests also - // test that a diff is copy-able. This will panic if it fails. This - // is enabled during unit tests. - // - // This should never be true during production usage, but even if it is, - // it can't do any real harm. - if contextTestDeepCopyOnPlan { - p.Diff.DeepCopy() - } - - /* - // We don't do the reverification during the new destroy plan because - // it will use a different apply process. - if X_legacyGraph { - // Now that we have a diff, we can build the exact graph that Apply will use - // and catch any possible cycles during the Plan phase. - if _, err := c.Graph(GraphTypeLegacy, nil); err != nil { - return nil, err - } - } - */ + p.Changes = c.changes - var errs error - if len(walker.ValidationErrors) > 0 { - errs = multierror.Append(errs, walker.ValidationErrors...) - } - return p, errs + return p, diags } // Refresh goes through all the resources in the state and refreshes them @@ -629,27 +567,46 @@ func (c *Context) Plan() (*Plan, error) { // // Even in the case an error is returned, the state may be returned and // will potentially be partially updated. -func (c *Context) Refresh() (*State, error) { +func (c *Context) Refresh() (*states.State, tfdiags.Diagnostics) { defer c.acquireRun("refresh")() // Copy our own state c.state = c.state.DeepCopy() + // Refresh builds a partial changeset as part of its work because it must + // create placeholder stubs for any resource instances that'll be created + // in subsequent plan so that provider configurations and data resources + // can interpolate from them. This plan is always thrown away after + // the operation completes, restoring any existing changeset. + oldChanges := c.changes + defer func() { c.changes = oldChanges }() + c.changes = plans.NewChanges() + // Build the graph. - graph, err := c.Graph(GraphTypeRefresh, nil) - if err != nil { - return nil, err + graph, diags := c.Graph(GraphTypeRefresh, nil) + if diags.HasErrors() { + return nil, diags } // Do the walk - if _, err := c.walk(graph, walkRefresh); err != nil { - return nil, err - } - - // Clean out any unused things - c.state.prune() - - return c.state, nil + _, walkDiags := c.walk(graph, walkRefresh) + diags = diags.Append(walkDiags) + if walkDiags.HasErrors() { + return nil, diags + } + + // During our walk we will have created planned object placeholders in + // state for resource instances that are in configuration but not yet + // created. These were created only to allow expression evaluation to + // work properly in provider and data blocks during the walk and must + // now be discarded, since a subsequent plan walk is responsible for + // creating these "for real". + // TODO: Consolidate refresh and plan into a single walk, so that the + // refresh walk doesn't need to emulate various aspects of the plan + // walk in order to properly evaluate provider and data blocks. + c.state.SyncWrapper().RemovePlannedResourceInstanceObjects() + + return c.state, diags } // Stop stops the running task. @@ -675,32 +632,33 @@ func (c *Context) Stop() { // Grab the condition var before we exit if cond := c.runCond; cond != nil { + log.Printf("[INFO] terraform: waiting for graceful stop to complete") cond.Wait() } log.Printf("[WARN] terraform: stop complete") } -// Validate validates the configuration and returns any warnings or errors. +// Validate performs semantic validation of the configuration, and returning +// any warnings or errors. +// +// Syntax and structural checks are performed by the configuration loader, +// and so are not repeated here. func (c *Context) Validate() tfdiags.Diagnostics { defer c.acquireRun("validate")() var diags tfdiags.Diagnostics - // Validate the configuration itself - diags = diags.Append(c.module.Validate()) - - // This only needs to be done for the root module, since inter-module - // variables are validated in the module tree. - if config := c.module.Config(); config != nil { - // Validate the user variables - for _, err := range smcUserVariables(config, c.variables) { - diags = diags.Append(err) - } + // Validate input variables. We do this only for the values supplied + // by the root module, since child module calls are validated when we + // visit their graph nodes. + if c.config != nil { + varDiags := checkInputVariables(c.config.Module.Variables, c.variables) + diags = diags.Append(varDiags) } - // If we have errors at this point, the graphing has no chance, - // so just bail early. + // If we have errors at this point then we probably won't be able to + // construct a graph without producing redundant errors, so we'll halt early. if diags.HasErrors() { return diags } @@ -709,48 +667,41 @@ func (c *Context) Validate() tfdiags.Diagnostics { // We also validate the graph generated here, but this graph doesn't // necessarily match the graph that Plan will generate, so we'll validate the // graph again later after Planning. - graph, err := c.Graph(GraphTypeValidate, nil) - if err != nil { - diags = diags.Append(err) + graph, graphDiags := c.Graph(GraphTypeValidate, nil) + diags = diags.Append(graphDiags) + if graphDiags.HasErrors() { return diags } // Walk - walker, err := c.walk(graph, walkValidate) - if err != nil { - diags = diags.Append(err) - } - - sort.Strings(walker.ValidationWarnings) - sort.Slice(walker.ValidationErrors, func(i, j int) bool { - return walker.ValidationErrors[i].Error() < walker.ValidationErrors[j].Error() - }) - - for _, warn := range walker.ValidationWarnings { - diags = diags.Append(tfdiags.SimpleWarning(warn)) - } - for _, err := range walker.ValidationErrors { - diags = diags.Append(err) + walker, walkDiags := c.walk(graph, walkValidate) + diags = diags.Append(walker.NonFatalDiagnostics) + diags = diags.Append(walkDiags) + if walkDiags.HasErrors() { + return diags } return diags } -// Module returns the module tree associated with this context. -func (c *Context) Module() *module.Tree { - return c.module +// Config returns the configuration tree associated with this context. +func (c *Context) Config() *configs.Config { + return c.config } // Variables will return the mapping of variables that were defined // for this Context. If Input was called, this mapping may be different // than what was given. -func (c *Context) Variables() map[string]interface{} { +func (c *Context) Variables() InputValues { return c.variables } // SetVariable sets a variable after a context has already been built. -func (c *Context) SetVariable(k string, v interface{}) { - c.variables[k] = v +func (c *Context) SetVariable(k string, v cty.Value) { + c.variables[k] = &InputValue{ + Value: v, + SourceType: ValueFromCaller, + } } func (c *Context) acquireRun(phase string) func() { @@ -767,9 +718,6 @@ func (c *Context) acquireRun(phase string) func() { // Build our lock c.runCond = sync.NewCond(&c.l) - // Setup debugging - dbug.SetPhase(phase) - // Create a new run context c.runContext, c.runContextCancel = context.WithCancel(context.Background()) @@ -787,11 +735,6 @@ func (c *Context) releaseRun() { c.l.Lock() defer c.l.Unlock() - // setting the phase to "INVALID" lets us easily detect if we have - // operations happening outside of a run, or we missed setting the proper - // phase - dbug.SetPhase("INVALID") - // End our run. We check if runContext is non-nil because it can be // set to nil if it was cancelled via Stop() if c.runContextCancel != nil { @@ -807,30 +750,33 @@ func (c *Context) releaseRun() { c.runContext = nil } -func (c *Context) walk(graph *Graph, operation walkOperation) (*ContextGraphWalker, error) { - // Keep track of the "real" context which is the context that does - // the real work: talking to real providers, modifying real state, etc. - realCtx := c - +func (c *Context) walk(graph *Graph, operation walkOperation) (*ContextGraphWalker, tfdiags.Diagnostics) { log.Printf("[DEBUG] Starting graph walk: %s", operation.String()) - walker := &ContextGraphWalker{ - Context: realCtx, - Operation: operation, - StopContext: c.runContext, - } + walker := c.graphWalker(operation) // Watch for a stop so we can call the provider Stop() API. watchStop, watchWait := c.watchStop(walker) // Walk the real graph, this will block until it completes - realErr := graph.Walk(walker) + diags := graph.Walk(walker) // Close the channel so the watcher stops, and wait for it to return. close(watchStop) <-watchWait - return walker, realErr + return walker, diags +} + +func (c *Context) graphWalker(operation walkOperation) *ContextGraphWalker { + return &ContextGraphWalker{ + Context: c, + State: c.state.SyncWrapper(), + Changes: c.changes.SyncWrapper(), + Operation: operation, + StopContext: c.runContext, + RootVariableValues: c.variables, + } } // watchStop immediately returns a `stop` and a `wait` chan after dispatching @@ -863,12 +809,13 @@ func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan s } // If we're here, we're stopped, trigger the call. + log.Printf("[TRACE] Context: requesting providers and provisioners to gracefully stop") { // Copy the providers so that a misbehaved blocking Stop doesn't // completely hang Terraform. walker.providerLock.Lock() - ps := make([]ResourceProvider, 0, len(walker.providerCache)) + ps := make([]providers.Interface, 0, len(walker.providerCache)) for _, p := range walker.providerCache { ps = append(ps, p) } @@ -885,7 +832,7 @@ func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan s { // Call stop on all the provisioners walker.provisionerLock.Lock() - ps := make([]ResourceProvisioner, 0, len(walker.provisionerCache)) + ps := make([]provisioners.Interface, 0, len(walker.provisionerCache)) for _, p := range walker.provisionerCache { ps = append(ps, p) } @@ -955,3 +902,37 @@ func parseVariableAsHCL(name string, input string, targetType config.VariableTyp panic(fmt.Errorf("unknown type %s", targetType.Printable())) } } + +// ShimLegacyState is a helper that takes the legacy state type and +// converts it to the new state type. +// +// This is implemented as a state file upgrade, so it will not preserve +// parts of the state structure that are not included in a serialized state, +// such as the resolved results of any local values, outputs in non-root +// modules, etc. +func ShimLegacyState(legacy *State) (*states.State, error) { + if legacy == nil { + return nil, nil + } + var buf bytes.Buffer + err := WriteState(legacy, &buf) + if err != nil { + return nil, err + } + f, err := statefile.Read(&buf) + if err != nil { + return nil, err + } + return f.State, err +} + +// MustShimLegacyState is a wrapper around ShimLegacyState that panics if +// the conversion does not succeed. This is primarily intended for tests where +// the given legacy state is an object constructed within the test. +func MustShimLegacyState(legacy *State) *states.State { + ret, err := ShimLegacyState(legacy) + if err != nil { + panic(err) + } + return ret +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_components.go b/vendor/github.com/hashicorp/terraform/terraform/context_components.go index 6f507445c3..26ec99595b 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/context_components.go +++ b/vendor/github.com/hashicorp/terraform/terraform/context_components.go @@ -2,6 +2,9 @@ package terraform import ( "fmt" + + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/provisioners" ) // contextComponentFactory is the interface that Context uses @@ -12,25 +15,25 @@ type contextComponentFactory interface { // ResourceProvider creates a new ResourceProvider with the given // type. The "uid" is a unique identifier for this provider being // initialized that can be used for internal tracking. - ResourceProvider(typ, uid string) (ResourceProvider, error) + ResourceProvider(typ, uid string) (providers.Interface, error) ResourceProviders() []string // ResourceProvisioner creates a new ResourceProvisioner with the // given type. The "uid" is a unique identifier for this provisioner // being initialized that can be used for internal tracking. - ResourceProvisioner(typ, uid string) (ResourceProvisioner, error) + ResourceProvisioner(typ, uid string) (provisioners.Interface, error) ResourceProvisioners() []string } // basicComponentFactory just calls a factory from a map directly. type basicComponentFactory struct { - providers map[string]ResourceProviderFactory - provisioners map[string]ResourceProvisionerFactory + providers map[string]providers.Factory + provisioners map[string]ProvisionerFactory } func (c *basicComponentFactory) ResourceProviders() []string { result := make([]string, len(c.providers)) - for k, _ := range c.providers { + for k := range c.providers { result = append(result, k) } @@ -39,14 +42,14 @@ func (c *basicComponentFactory) ResourceProviders() []string { func (c *basicComponentFactory) ResourceProvisioners() []string { result := make([]string, len(c.provisioners)) - for k, _ := range c.provisioners { + for k := range c.provisioners { result = append(result, k) } return result } -func (c *basicComponentFactory) ResourceProvider(typ, uid string) (ResourceProvider, error) { +func (c *basicComponentFactory) ResourceProvider(typ, uid string) (providers.Interface, error) { f, ok := c.providers[typ] if !ok { return nil, fmt.Errorf("unknown provider %q", typ) @@ -55,7 +58,7 @@ func (c *basicComponentFactory) ResourceProvider(typ, uid string) (ResourceProvi return f() } -func (c *basicComponentFactory) ResourceProvisioner(typ, uid string) (ResourceProvisioner, error) { +func (c *basicComponentFactory) ResourceProvisioner(typ, uid string) (provisioners.Interface, error) { f, ok := c.provisioners[typ] if !ok { return nil, fmt.Errorf("unknown provisioner %q", typ) diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go b/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go index 084f0105dd..0a424a01d1 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go +++ b/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go @@ -14,8 +14,8 @@ const ( GraphTypePlan GraphTypePlanDestroy GraphTypeApply - GraphTypeInput GraphTypeValidate + GraphTypeEval // only visits in-memory elements such as variables, locals, and outputs. ) // GraphTypeMap is a mapping of human-readable string to GraphType. This @@ -23,10 +23,10 @@ const ( // graph types. var GraphTypeMap = map[string]GraphType{ "apply": GraphTypeApply, - "input": GraphTypeInput, "plan": GraphTypePlan, "plan-destroy": GraphTypePlanDestroy, "refresh": GraphTypeRefresh, "legacy": GraphTypeLegacy, "validate": GraphTypeValidate, + "eval": GraphTypeEval, } diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_import.go b/vendor/github.com/hashicorp/terraform/terraform/context_import.go index e940143197..313e9094f9 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/context_import.go +++ b/vendor/github.com/hashicorp/terraform/terraform/context_import.go @@ -1,7 +1,10 @@ package terraform import ( - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) // ImportOpts are used as the configuration for Import. @@ -9,23 +12,23 @@ type ImportOpts struct { // Targets are the targets to import Targets []*ImportTarget - // Module is optional, and specifies a config module that is loaded - // into the graph and evaluated. The use case for this is to provide - // provider configuration. - Module *module.Tree + // Config is optional, and specifies a config tree that will be loaded + // into the graph and evaluated. This is the source for provider + // configurations. + Config *configs.Config } // ImportTarget is a single resource to import. type ImportTarget struct { - // Addr is the full resource address of the resource to import. - // Example: "module.foo.aws_instance.bar" - Addr string + // Addr is the address for the resource instance that the new object should + // be imported into. + Addr addrs.AbsResourceInstance // ID is the ID of the resource to import. This is resource-specific. ID string - // Provider string - Provider string + // ProviderAddr is the address of the provider that should handle the import. + ProviderAddr addrs.AbsProviderConfig } // Import takes already-created external resources and brings them @@ -38,7 +41,9 @@ type ImportTarget struct { // Further, this operation also gracefully handles partial state. If during // an import there is a failure, all previously imported resources remain // imported. -func (c *Context) Import(opts *ImportOpts) (*State, error) { +func (c *Context) Import(opts *ImportOpts) (*states.State, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + // Hold a lock since we can modify our own state here defer c.acquireRun("import")() @@ -47,31 +52,32 @@ func (c *Context) Import(opts *ImportOpts) (*State, error) { // If no module is given, default to the module configured with // the Context. - module := opts.Module - if module == nil { - module = c.module + config := opts.Config + if config == nil { + config = c.config } // Initialize our graph builder builder := &ImportGraphBuilder{ ImportTargets: opts.Targets, - Module: module, - Providers: c.components.ResourceProviders(), + Config: config, + Components: c.components, + Schemas: c.schemas, } // Build the graph! - graph, err := builder.Build(RootModulePath) - if err != nil { - return c.state, err + graph, graphDiags := builder.Build(addrs.RootModuleInstance) + diags = diags.Append(graphDiags) + if graphDiags.HasErrors() { + return c.state, diags } // Walk it - if _, err := c.walk(graph, walkImport); err != nil { - return c.state, err + _, walkDiags := c.walk(graph, walkImport) + diags = diags.Append(walkDiags) + if walkDiags.HasErrors() { + return c.state, diags } - // Clean the state - c.state.prune() - - return c.state, nil + return c.state, diags } diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_input.go b/vendor/github.com/hashicorp/terraform/terraform/context_input.go new file mode 100644 index 0000000000..6c7be8817c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/context_input.go @@ -0,0 +1,251 @@ +package terraform + +import ( + "context" + "fmt" + "log" + "sort" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcldec" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/tfdiags" +) + +// Input asks for input to fill variables and provider configurations. +// This modifies the configuration in-place, so asking for Input twice +// may result in different UI output showing different current values. +func (c *Context) Input(mode InputMode) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + defer c.acquireRun("input")() + + if c.uiInput == nil { + log.Printf("[TRACE] Context.Input: uiInput is nil, so skipping") + return diags + } + + ctx := context.Background() + + if mode&InputModeVar != 0 { + log.Printf("[TRACE] Context.Input: Prompting for variables") + + // Walk the variables first for the root module. We walk them in + // alphabetical order for UX reasons. + configs := c.config.Module.Variables + names := make([]string, 0, len(configs)) + for name := range configs { + names = append(names, name) + } + sort.Strings(names) + Variables: + for _, n := range names { + v := configs[n] + + // If we only care about unset variables, then we should set any + // variable that is already set. + if mode&InputModeVarUnset != 0 { + if _, isSet := c.variables[n]; isSet { + continue + } + } + + // this should only happen during tests + if c.uiInput == nil { + log.Println("[WARN] Context.uiInput is nil during input walk") + continue + } + + // Ask the user for a value for this variable + var rawValue string + retry := 0 + for { + var err error + rawValue, err = c.uiInput.Input(ctx, &InputOpts{ + Id: fmt.Sprintf("var.%s", n), + Query: fmt.Sprintf("var.%s", n), + Description: v.Description, + }) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to request interactive input", + fmt.Sprintf("Terraform attempted to request a value for var.%s interactively, but encountered an error: %s.", n, err), + )) + return diags + } + + if rawValue == "" && v.Default == cty.NilVal { + // Redo if it is required, but abort if we keep getting + // blank entries + if retry > 2 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Required variable not assigned", + fmt.Sprintf("The variable %q is required, so Terraform cannot proceed without a defined value for it.", n), + )) + continue Variables + } + retry++ + continue + } + + break + } + + val, valDiags := v.ParsingMode.Parse(n, rawValue) + diags = diags.Append(valDiags) + if diags.HasErrors() { + continue + } + + c.variables[n] = &InputValue{ + Value: val, + SourceType: ValueFromInput, + } + } + } + + if mode&InputModeProvider != 0 { + log.Printf("[TRACE] Context.Input: Prompting for provider arguments") + + // We prompt for input only for provider configurations defined in + // the root module. At the time of writing that is an arbitrary + // restriction, but we have future plans to support "count" and + // "for_each" on modules that will then prevent us from supporting + // input for child module configurations anyway (since we'd need to + // dynamic-expand first), and provider configurations in child modules + // are not recommended since v0.11 anyway, so this restriction allows + // us to keep this relatively simple without significant hardship. + + pcs := make(map[string]*configs.Provider) + pas := make(map[string]addrs.ProviderConfig) + for _, pc := range c.config.Module.ProviderConfigs { + addr := pc.Addr() + pcs[addr.String()] = pc + pas[addr.String()] = addr + log.Printf("[TRACE] Context.Input: Provider %s declared at %s", addr, pc.DeclRange) + } + // We also need to detect _implied_ provider configs from resources. + // These won't have *configs.Provider objects, but they will still + // exist in the map and we'll just treat them as empty below. + for _, rc := range c.config.Module.ManagedResources { + pa := rc.ProviderConfigAddr() + if pa.Alias != "" { + continue // alias configurations cannot be implied + } + if _, exists := pcs[pa.String()]; !exists { + pcs[pa.String()] = nil + pas[pa.String()] = pa + log.Printf("[TRACE] Context.Input: Provider %s implied by resource block at %s", pa, rc.DeclRange) + } + } + for _, rc := range c.config.Module.DataResources { + pa := rc.ProviderConfigAddr() + if pa.Alias != "" { + continue // alias configurations cannot be implied + } + if _, exists := pcs[pa.String()]; !exists { + pcs[pa.String()] = nil + pas[pa.String()] = pa + log.Printf("[TRACE] Context.Input: Provider %s implied by data block at %s", pa, rc.DeclRange) + } + } + + for pk, pa := range pas { + pc := pcs[pk] // will be nil if this is an implied config + + // Wrap the input into a namespace + input := &PrefixUIInput{ + IdPrefix: pk, + QueryPrefix: pk + ".", + UIInput: c.uiInput, + } + + schema := c.schemas.ProviderConfig(pa.Type) + if schema == nil { + // Could either be an incorrect config or just an incomplete + // mock in tests. We'll let a later pass decide, and just + // ignore this for the purposes of gathering input. + log.Printf("[TRACE] Context.Input: No schema available for provider type %q", pa.Type) + continue + } + + // For our purposes here we just want to detect if attrbutes are + // set in config at all, so rather than doing a full decode + // (which would require us to prepare an evalcontext, etc) we'll + // use the low-level HCL API to process only the top-level + // structure. + var attrExprs hcl.Attributes // nil if there is no config + if pc != nil && pc.Config != nil { + lowLevelSchema := schemaForInputSniffing(hcldec.ImpliedSchema(schema.DecoderSpec())) + content, _, diags := pc.Config.PartialContent(lowLevelSchema) + if diags.HasErrors() { + log.Printf("[TRACE] Context.Input: %s has decode error, so ignoring: %s", pa, diags.Error()) + continue + } + attrExprs = content.Attributes + } + + keys := make([]string, 0, len(schema.Attributes)) + for key := range schema.Attributes { + keys = append(keys, key) + } + sort.Strings(keys) + + vals := map[string]cty.Value{} + for _, key := range keys { + attrS := schema.Attributes[key] + if attrS.Optional { + continue + } + if attrExprs != nil { + if _, exists := attrExprs[key]; exists { + continue + } + } + if !attrS.Type.Equals(cty.String) { + continue + } + + log.Printf("[TRACE] Context.Input: Prompting for %s argument %s", pa, key) + rawVal, err := input.Input(ctx, &InputOpts{ + Id: key, + Query: key, + Description: attrS.Description, + }) + if err != nil { + log.Printf("[TRACE] Context.Input: Failed to prompt for %s argument %s: %s", pa, key, err) + continue + } + + vals[key] = cty.StringVal(rawVal) + } + + c.providerInputConfig[pk] = vals + log.Printf("[TRACE] Context.Input: Input for %s: %#v", pk, vals) + } + } + + return diags +} + +// schemaForInputSniffing returns a transformed version of a given schema +// that marks all attributes as optional, which the Context.Input method can +// use to detect whether a required argument is set without missing arguments +// themselves generating errors. +func schemaForInputSniffing(schema *hcl.BodySchema) *hcl.BodySchema { + ret := &hcl.BodySchema{ + Attributes: make([]hcl.AttributeSchema, len(schema.Attributes)), + Blocks: schema.Blocks, + } + + for i, attrS := range schema.Attributes { + ret.Attributes[i] = attrS + ret.Attributes[i].Required = false + } + + return ret +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/debug.go b/vendor/github.com/hashicorp/terraform/terraform/debug.go deleted file mode 100644 index 265339f636..0000000000 --- a/vendor/github.com/hashicorp/terraform/terraform/debug.go +++ /dev/null @@ -1,523 +0,0 @@ -package terraform - -import ( - "archive/tar" - "bytes" - "compress/gzip" - "encoding/json" - "fmt" - "io" - "os" - "path/filepath" - "sync" - "time" -) - -// DebugInfo is the global handler for writing the debug archive. All methods -// are safe to call concurrently. Setting DebugInfo to nil will disable writing -// the debug archive. All methods are safe to call on the nil value. -var dbug *debugInfo - -// SetDebugInfo initializes the debug handler with a backing file in the -// provided directory. This must be called before any other terraform package -// operations or not at all. Once his is called, CloseDebugInfo should be -// called before program exit. -func SetDebugInfo(path string) error { - if os.Getenv("TF_DEBUG") == "" { - return nil - } - - di, err := newDebugInfoFile(path) - if err != nil { - return err - } - - dbug = di - return nil -} - -// CloseDebugInfo is the exported interface to Close the debug info handler. -// The debug handler needs to be closed before program exit, so we export this -// function to be deferred in the appropriate entrypoint for our executable. -func CloseDebugInfo() error { - return dbug.Close() -} - -// newDebugInfoFile initializes the global debug handler with a backing file in -// the provided directory. -func newDebugInfoFile(dir string) (*debugInfo, error) { - err := os.MkdirAll(dir, 0755) - if err != nil { - return nil, err - } - - // FIXME: not guaranteed unique, but good enough for now - name := fmt.Sprintf("debug-%s", time.Now().Format("2006-01-02-15-04-05.999999999")) - archivePath := filepath.Join(dir, name+".tar.gz") - - f, err := os.OpenFile(archivePath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) - if err != nil { - return nil, err - } - return newDebugInfo(name, f) -} - -// newDebugInfo initializes the global debug handler. -func newDebugInfo(name string, w io.Writer) (*debugInfo, error) { - gz := gzip.NewWriter(w) - - d := &debugInfo{ - name: name, - w: w, - gz: gz, - tar: tar.NewWriter(gz), - } - - // create the subdirs we need - topHdr := &tar.Header{ - Name: name, - Typeflag: tar.TypeDir, - Mode: 0755, - } - graphsHdr := &tar.Header{ - Name: name + "/graphs", - Typeflag: tar.TypeDir, - Mode: 0755, - } - err := d.tar.WriteHeader(topHdr) - // if the first errors, the second will too - err = d.tar.WriteHeader(graphsHdr) - if err != nil { - return nil, err - } - - return d, nil -} - -// debugInfo provides various methods for writing debug information to a -// central archive. The debugInfo struct should be initialized once before any -// output is written, and Close should be called before program exit. All -// exported methods on debugInfo will be safe for concurrent use. The exported -// methods are also all safe to call on a nil pointer, so that there is no need -// for conditional blocks before writing debug information. -// -// Each write operation done by the debugInfo will flush the gzip.Writer and -// tar.Writer, and call Sync() or Flush() on the output writer as needed. This -// ensures that as much data as possible is written to storage in the event of -// a crash. The append format of the tar file, and the stream format of the -// gzip writer allow easy recovery f the data in the event that the debugInfo -// is not closed before program exit. -type debugInfo struct { - sync.Mutex - - // archive root directory name - name string - - // current operation phase - phase string - - // step is monotonic counter for for recording the order of operations - step int - - // flag to protect Close() - closed bool - - // the debug log output is in a tar.gz format, written to the io.Writer w - w io.Writer - gz *gzip.Writer - tar *tar.Writer -} - -// Set the name of the current operational phase in the debug handler. Each file -// in the archive will contain the name of the phase in which it was created, -// i.e. "input", "apply", "plan", "refresh", "validate" -func (d *debugInfo) SetPhase(phase string) { - if d == nil { - return - } - d.Lock() - defer d.Unlock() - - d.phase = phase -} - -// Close the debugInfo, finalizing the data in storage. This closes the -// tar.Writer, the gzip.Wrtier, and if the output writer is an io.Closer, it is -// also closed. -func (d *debugInfo) Close() error { - if d == nil { - return nil - } - - d.Lock() - defer d.Unlock() - - if d.closed { - return nil - } - d.closed = true - - d.tar.Close() - d.gz.Close() - - if c, ok := d.w.(io.Closer); ok { - return c.Close() - } - return nil -} - -// debug buffer is an io.WriteCloser that will write itself to the debug -// archive when closed. -type debugBuffer struct { - debugInfo *debugInfo - name string - buf bytes.Buffer -} - -func (b *debugBuffer) Write(d []byte) (int, error) { - return b.buf.Write(d) -} - -func (b *debugBuffer) Close() error { - return b.debugInfo.WriteFile(b.name, b.buf.Bytes()) -} - -// ioutils only has a noop ReadCloser -type nopWriteCloser struct{} - -func (nopWriteCloser) Write([]byte) (int, error) { return 0, nil } -func (nopWriteCloser) Close() error { return nil } - -// NewFileWriter returns an io.WriteClose that will be buffered and written to -// the debug archive when closed. -func (d *debugInfo) NewFileWriter(name string) io.WriteCloser { - if d == nil { - return nopWriteCloser{} - } - - return &debugBuffer{ - debugInfo: d, - name: name, - } -} - -type syncer interface { - Sync() error -} - -type flusher interface { - Flush() error -} - -// Flush the tar.Writer and the gzip.Writer. Flush() or Sync() will be called -// on the output writer if they are available. -func (d *debugInfo) flush() { - d.tar.Flush() - d.gz.Flush() - - if f, ok := d.w.(flusher); ok { - f.Flush() - } - - if s, ok := d.w.(syncer); ok { - s.Sync() - } -} - -// WriteFile writes data as a single file to the debug arhive. -func (d *debugInfo) WriteFile(name string, data []byte) error { - if d == nil { - return nil - } - - d.Lock() - defer d.Unlock() - return d.writeFile(name, data) -} - -func (d *debugInfo) writeFile(name string, data []byte) error { - defer d.flush() - path := fmt.Sprintf("%s/%d-%s-%s", d.name, d.step, d.phase, name) - d.step++ - - hdr := &tar.Header{ - Name: path, - Mode: 0644, - Size: int64(len(data)), - } - err := d.tar.WriteHeader(hdr) - if err != nil { - return err - } - - _, err = d.tar.Write(data) - return err -} - -// DebugHook implements all methods of the terraform.Hook interface, and writes -// the arguments to a file in the archive. When a suitable format for the -// argument isn't available, the argument is encoded using json.Marshal. If the -// debug handler is nil, all DebugHook methods are noop, so no time is spent in -// marshaling the data structures. -type DebugHook struct{} - -func (*DebugHook) PreApply(ii *InstanceInfo, is *InstanceState, id *InstanceDiff) (HookAction, error) { - if dbug == nil { - return HookActionContinue, nil - } - - var buf bytes.Buffer - - if ii != nil { - buf.WriteString(ii.HumanId() + "\n") - } - - if is != nil { - buf.WriteString(is.String() + "\n") - } - - idCopy, err := id.Copy() - if err != nil { - return HookActionContinue, err - } - js, err := json.MarshalIndent(idCopy, "", " ") - if err != nil { - return HookActionContinue, err - } - buf.Write(js) - - dbug.WriteFile("hook-PreApply", buf.Bytes()) - - return HookActionContinue, nil -} - -func (*DebugHook) PostApply(ii *InstanceInfo, is *InstanceState, err error) (HookAction, error) { - if dbug == nil { - return HookActionContinue, nil - } - - var buf bytes.Buffer - - if ii != nil { - buf.WriteString(ii.HumanId() + "\n") - } - - if is != nil { - buf.WriteString(is.String() + "\n") - } - - if err != nil { - buf.WriteString(err.Error()) - } - - dbug.WriteFile("hook-PostApply", buf.Bytes()) - - return HookActionContinue, nil -} - -func (*DebugHook) PreDiff(ii *InstanceInfo, is *InstanceState) (HookAction, error) { - if dbug == nil { - return HookActionContinue, nil - } - - var buf bytes.Buffer - if ii != nil { - buf.WriteString(ii.HumanId() + "\n") - } - - if is != nil { - buf.WriteString(is.String()) - buf.WriteString("\n") - } - dbug.WriteFile("hook-PreDiff", buf.Bytes()) - - return HookActionContinue, nil -} - -func (*DebugHook) PostDiff(ii *InstanceInfo, id *InstanceDiff) (HookAction, error) { - if dbug == nil { - return HookActionContinue, nil - } - - var buf bytes.Buffer - if ii != nil { - buf.WriteString(ii.HumanId() + "\n") - } - - idCopy, err := id.Copy() - if err != nil { - return HookActionContinue, err - } - js, err := json.MarshalIndent(idCopy, "", " ") - if err != nil { - return HookActionContinue, err - } - buf.Write(js) - - dbug.WriteFile("hook-PostDiff", buf.Bytes()) - - return HookActionContinue, nil -} - -func (*DebugHook) PreProvisionResource(ii *InstanceInfo, is *InstanceState) (HookAction, error) { - if dbug == nil { - return HookActionContinue, nil - } - - var buf bytes.Buffer - if ii != nil { - buf.WriteString(ii.HumanId() + "\n") - } - - if is != nil { - buf.WriteString(is.String()) - buf.WriteString("\n") - } - dbug.WriteFile("hook-PreProvisionResource", buf.Bytes()) - - return HookActionContinue, nil -} - -func (*DebugHook) PostProvisionResource(ii *InstanceInfo, is *InstanceState) (HookAction, error) { - if dbug == nil { - return HookActionContinue, nil - } - - var buf bytes.Buffer - if ii != nil { - buf.WriteString(ii.HumanId()) - buf.WriteString("\n") - } - - if is != nil { - buf.WriteString(is.String()) - buf.WriteString("\n") - } - dbug.WriteFile("hook-PostProvisionResource", buf.Bytes()) - return HookActionContinue, nil -} - -func (*DebugHook) PreProvision(ii *InstanceInfo, s string) (HookAction, error) { - if dbug == nil { - return HookActionContinue, nil - } - - var buf bytes.Buffer - if ii != nil { - buf.WriteString(ii.HumanId()) - buf.WriteString("\n") - } - buf.WriteString(s + "\n") - - dbug.WriteFile("hook-PreProvision", buf.Bytes()) - return HookActionContinue, nil -} - -func (*DebugHook) PostProvision(ii *InstanceInfo, s string, err error) (HookAction, error) { - if dbug == nil { - return HookActionContinue, nil - } - - var buf bytes.Buffer - if ii != nil { - buf.WriteString(ii.HumanId() + "\n") - } - buf.WriteString(s + "\n") - - dbug.WriteFile("hook-PostProvision", buf.Bytes()) - return HookActionContinue, nil -} - -func (*DebugHook) ProvisionOutput(ii *InstanceInfo, s1 string, s2 string) { - if dbug == nil { - return - } - - var buf bytes.Buffer - if ii != nil { - buf.WriteString(ii.HumanId()) - buf.WriteString("\n") - } - buf.WriteString(s1 + "\n") - buf.WriteString(s2 + "\n") - - dbug.WriteFile("hook-ProvisionOutput", buf.Bytes()) -} - -func (*DebugHook) PreRefresh(ii *InstanceInfo, is *InstanceState) (HookAction, error) { - if dbug == nil { - return HookActionContinue, nil - } - - var buf bytes.Buffer - if ii != nil { - buf.WriteString(ii.HumanId() + "\n") - } - - if is != nil { - buf.WriteString(is.String()) - buf.WriteString("\n") - } - dbug.WriteFile("hook-PreRefresh", buf.Bytes()) - return HookActionContinue, nil -} - -func (*DebugHook) PostRefresh(ii *InstanceInfo, is *InstanceState) (HookAction, error) { - if dbug == nil { - return HookActionContinue, nil - } - - var buf bytes.Buffer - if ii != nil { - buf.WriteString(ii.HumanId()) - buf.WriteString("\n") - } - - if is != nil { - buf.WriteString(is.String()) - buf.WriteString("\n") - } - dbug.WriteFile("hook-PostRefresh", buf.Bytes()) - return HookActionContinue, nil -} - -func (*DebugHook) PreImportState(ii *InstanceInfo, s string) (HookAction, error) { - if dbug == nil { - return HookActionContinue, nil - } - - var buf bytes.Buffer - if ii != nil { - buf.WriteString(ii.HumanId()) - buf.WriteString("\n") - } - buf.WriteString(s + "\n") - - dbug.WriteFile("hook-PreImportState", buf.Bytes()) - return HookActionContinue, nil -} - -func (*DebugHook) PostImportState(ii *InstanceInfo, iss []*InstanceState) (HookAction, error) { - if dbug == nil { - return HookActionContinue, nil - } - - var buf bytes.Buffer - - if ii != nil { - buf.WriteString(ii.HumanId() + "\n") - } - - for _, is := range iss { - if is != nil { - buf.WriteString(is.String() + "\n") - } - } - dbug.WriteFile("hook-PostImportState", buf.Bytes()) - return HookActionContinue, nil -} - -// skip logging this for now, since it could be huge -func (*DebugHook) PostStateUpdate(*State) (HookAction, error) { - return HookActionContinue, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/diff.go b/vendor/github.com/hashicorp/terraform/terraform/diff.go index d6dc550616..7a6ef3d328 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/diff.go +++ b/vendor/github.com/hashicorp/terraform/terraform/diff.go @@ -4,12 +4,20 @@ import ( "bufio" "bytes" "fmt" + "log" "reflect" "regexp" "sort" + "strconv" "strings" "sync" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/hcl2shim" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/zclconf/go-cty/cty" + "github.com/mitchellh/copystructure" ) @@ -69,8 +77,24 @@ func (d *Diff) Prune() { // // This should be the preferred method to add module diffs since it // allows us to optimize lookups later as well as control sorting. -func (d *Diff) AddModule(path []string) *ModuleDiff { - m := &ModuleDiff{Path: path} +func (d *Diff) AddModule(path addrs.ModuleInstance) *ModuleDiff { + // Lower the new-style address into a legacy-style address. + // This requires that none of the steps have instance keys, which is + // true for all addresses at the time of implementing this because + // "count" and "for_each" are not yet implemented for modules. + legacyPath := make([]string, len(path)) + for i, step := range path { + if step.InstanceKey != addrs.NoKey { + // FIXME: Once the rest of Terraform is ready to use count and + // for_each, remove all of this and just write the addrs.ModuleInstance + // value itself into the ModuleState. + panic("diff cannot represent modules with count or for_each keys") + } + + legacyPath[i] = step.Name + } + + m := &ModuleDiff{Path: legacyPath} m.init() d.Modules = append(d.Modules, m) return m @@ -79,7 +103,7 @@ func (d *Diff) AddModule(path []string) *ModuleDiff { // ModuleByPath is used to lookup the module diff for the given path. // This should be the preferred lookup mechanism as it allows for future // lookup optimizations. -func (d *Diff) ModuleByPath(path []string) *ModuleDiff { +func (d *Diff) ModuleByPath(path addrs.ModuleInstance) *ModuleDiff { if d == nil { return nil } @@ -87,7 +111,8 @@ func (d *Diff) ModuleByPath(path []string) *ModuleDiff { if mod.Path == nil { panic("missing module path") } - if reflect.DeepEqual(mod.Path, path) { + modPath := normalizeModulePath(mod.Path) + if modPath.String() == path.String() { return mod } } @@ -96,7 +121,7 @@ func (d *Diff) ModuleByPath(path []string) *ModuleDiff { // RootModule returns the ModuleState for the root module func (d *Diff) RootModule() *ModuleDiff { - root := d.ModuleByPath(rootModulePath) + root := d.ModuleByPath(addrs.RootModuleInstance) if root == nil { panic("missing root module") } @@ -166,7 +191,8 @@ func (d *Diff) String() string { keys := make([]string, 0, len(d.Modules)) lookup := make(map[string]*ModuleDiff) for _, m := range d.Modules { - key := fmt.Sprintf("module.%s", strings.Join(m.Path[1:], ".")) + addr := normalizeModulePath(m.Path) + key := addr.String() keys = append(keys, key) lookup[key] = m } @@ -384,6 +410,541 @@ type InstanceDiff struct { func (d *InstanceDiff) Lock() { d.mu.Lock() } func (d *InstanceDiff) Unlock() { d.mu.Unlock() } +// ApplyToValue merges the receiver into the given base value, returning a +// new value that incorporates the planned changes. The given value must +// conform to the given schema, or this method will panic. +// +// This method is intended for shimming old subsystems that still use this +// legacy diff type to work with the new-style types. +func (d *InstanceDiff) ApplyToValue(base cty.Value, schema *configschema.Block) (cty.Value, error) { + // Create an InstanceState attributes from our existing state. + // We can use this to more easily apply the diff changes. + attrs := hcl2shim.FlatmapValueFromHCL2(base) + applied, err := d.Apply(attrs, schema) + if err != nil { + return base, err + } + + val, err := hcl2shim.HCL2ValueFromFlatmap(applied, schema.ImpliedType()) + if err != nil { + return base, err + } + + return schema.CoerceValue(val) +} + +// Apply applies the diff to the provided flatmapped attributes, +// returning the new instance attributes. +// +// This method is intended for shimming old subsystems that still use this +// legacy diff type to work with the new-style types. +func (d *InstanceDiff) Apply(attrs map[string]string, schema *configschema.Block) (map[string]string, error) { + // We always build a new value here, even if the given diff is "empty", + // because we might be planning to create a new instance that happens + // to have no attributes set, and so we want to produce an empty object + // rather than just echoing back the null old value. + if attrs == nil { + attrs = map[string]string{} + } + + // Rather applying the diff to mutate the attrs, we'll copy new values into + // here to avoid the possibility of leaving stale values. + result := map[string]string{} + + if d.Destroy || d.DestroyDeposed || d.DestroyTainted { + return result, nil + } + + return d.applyBlockDiff(nil, attrs, schema) +} + +func (d *InstanceDiff) applyBlockDiff(path []string, attrs map[string]string, schema *configschema.Block) (map[string]string, error) { + result := map[string]string{} + name := "" + if len(path) > 0 { + name = path[len(path)-1] + } + + // localPrefix is used to build the local result map + localPrefix := "" + if name != "" { + localPrefix = name + "." + } + + // iterate over the schema rather than the attributes, so we can handle + // different block types separately from plain attributes + for n, attrSchema := range schema.Attributes { + var err error + newAttrs, err := d.applyAttrDiff(append(path, n), attrs, attrSchema) + + if err != nil { + return result, err + } + + for k, v := range newAttrs { + result[localPrefix+k] = v + } + } + + blockPrefix := strings.Join(path, ".") + if blockPrefix != "" { + blockPrefix += "." + } + for n, block := range schema.BlockTypes { + // we need to find the set of all keys that traverse this block + candidateKeys := map[string]bool{} + blockKey := blockPrefix + n + "." + localBlockPrefix := localPrefix + n + "." + + // we can only trust the diff for sets, since the path changes, so don't + // count existing values as candidate keys. If it turns out we're + // keeping the attributes, we will catch it down below with "keepBlock" + // after we check the set count. + if block.Nesting != configschema.NestingSet { + for k := range attrs { + if strings.HasPrefix(k, blockKey) { + nextDot := strings.Index(k[len(blockKey):], ".") + if nextDot < 0 { + continue + } + nextDot += len(blockKey) + candidateKeys[k[len(blockKey):nextDot]] = true + } + } + } + + for k, diff := range d.Attributes { + if strings.HasPrefix(k, blockKey) { + nextDot := strings.Index(k[len(blockKey):], ".") + if nextDot < 0 { + continue + } + + if diff.NewRemoved { + continue + } + + nextDot += len(blockKey) + candidateKeys[k[len(blockKey):nextDot]] = true + } + } + + // check each set candidate to see if it was removed. + // we need to do this, because when entire sets are removed, they may + // have the wrong key, and ony show diffs going to "" + if block.Nesting == configschema.NestingSet { + for k := range candidateKeys { + indexPrefix := strings.Join(append(path, n, k), ".") + "." + keep := false + // now check each set element to see if it's a new diff, or one + // that we're dropping. Since we're only applying the "New" + // portion of the set, we can ignore diffs that only contain "Old" + for attr, diff := range d.Attributes { + if !strings.HasPrefix(attr, indexPrefix) { + continue + } + + // check for empty "count" keys + if (strings.HasSuffix(attr, ".#") || strings.HasSuffix(attr, ".%")) && diff.New == "0" { + continue + } + + // removed items don't count either + if diff.NewRemoved { + continue + } + + // this must be a diff to keep + keep = true + break + } + if !keep { + delete(candidateKeys, k) + } + } + } + + for k := range candidateKeys { + newAttrs, err := d.applyBlockDiff(append(path, n, k), attrs, &block.Block) + if err != nil { + return result, err + } + + for attr, v := range newAttrs { + result[localBlockPrefix+attr] = v + } + } + + keepBlock := true + // check this block's count diff directly first, since we may not + // have candidates because it was removed and only set to "0" + if diff, ok := d.Attributes[blockKey+"#"]; ok { + if diff.New == "0" || diff.NewRemoved { + keepBlock = false + } + } + + // if there was no diff at all, then we need to keep the block attributes + if len(candidateKeys) == 0 && keepBlock { + for k, v := range attrs { + if strings.HasPrefix(k, blockKey) { + // we need the key relative to this block, so remove the + // entire prefix, then re-insert the block name. + localKey := localBlockPrefix + k[len(blockKey):] + result[localKey] = v + } + } + } + + countAddr := strings.Join(append(path, n, "#"), ".") + if countDiff, ok := d.Attributes[countAddr]; ok { + if countDiff.NewComputed { + result[localBlockPrefix+"#"] = hcl2shim.UnknownVariableValue + } else { + result[localBlockPrefix+"#"] = countDiff.New + + // While sets are complete, list are not, and we may not have all the + // information to track removals. If the list was truncated, we need to + // remove the extra items from the result. + if block.Nesting == configschema.NestingList && + countDiff.New != "" && countDiff.New != hcl2shim.UnknownVariableValue { + length, _ := strconv.Atoi(countDiff.New) + for k := range result { + if !strings.HasPrefix(k, localBlockPrefix) { + continue + } + + index := k[len(localBlockPrefix):] + nextDot := strings.Index(index, ".") + if nextDot < 1 { + continue + } + index = index[:nextDot] + i, err := strconv.Atoi(index) + if err != nil { + // this shouldn't happen since we added these + // ourself, but make note of it just in case. + log.Printf("[ERROR] bad list index in %q: %s", k, err) + continue + } + if i >= length { + delete(result, k) + } + } + } + } + } else if origCount, ok := attrs[countAddr]; ok && keepBlock { + result[localBlockPrefix+"#"] = origCount + } else { + result[localBlockPrefix+"#"] = countFlatmapContainerValues(localBlockPrefix+"#", result) + } + } + + return result, nil +} + +func (d *InstanceDiff) applyAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + ty := attrSchema.Type + switch { + case ty.IsListType(), ty.IsTupleType(), ty.IsMapType(): + return d.applyCollectionDiff(path, attrs, attrSchema) + case ty.IsSetType(): + return d.applySetDiff(path, attrs, attrSchema) + default: + return d.applySingleAttrDiff(path, attrs, attrSchema) + } +} + +func (d *InstanceDiff) applySingleAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + currentKey := strings.Join(path, ".") + + attr := path[len(path)-1] + + result := map[string]string{} + diff := d.Attributes[currentKey] + old, exists := attrs[currentKey] + + if diff != nil && diff.NewComputed { + result[attr] = config.UnknownVariableValue + return result, nil + } + + // "id" must exist and not be an empty string, or it must be unknown. + // This only applied to top-level "id" fields. + if attr == "id" && len(path) == 1 { + if old == "" { + result[attr] = config.UnknownVariableValue + } else { + result[attr] = old + } + return result, nil + } + + // attribute diffs are sometimes missed, so assume no diff means keep the + // old value + if diff == nil { + if exists { + result[attr] = old + } else { + // We need required values, so set those with an empty value. It + // must be set in the config, since if it were missing it would have + // failed validation. + if attrSchema.Required { + // we only set a missing string here, since bool or number types + // would have distinct zero value which shouldn't have been + // lost. + if attrSchema.Type == cty.String { + result[attr] = "" + } + } + } + return result, nil + } + + // check for missmatched diff values + if exists && + old != diff.Old && + old != config.UnknownVariableValue && + diff.Old != config.UnknownVariableValue { + return result, fmt.Errorf("diff apply conflict for %s: diff expects %q, but prior value has %q", attr, diff.Old, old) + } + + if diff.NewRemoved { + // don't set anything in the new value + return map[string]string{}, nil + } + + if diff.Old == diff.New && diff.New == "" { + // this can only be a valid empty string + if attrSchema.Type == cty.String { + result[attr] = "" + } + return result, nil + } + + if attrSchema.Computed && diff.NewComputed { + result[attr] = config.UnknownVariableValue + return result, nil + } + + result[attr] = diff.New + + return result, nil +} + +func (d *InstanceDiff) applyCollectionDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + result := map[string]string{} + + prefix := "" + if len(path) > 1 { + prefix = strings.Join(path[:len(path)-1], ".") + "." + } + + name := "" + if len(path) > 0 { + name = path[len(path)-1] + } + + currentKey := prefix + name + + // check the index first for special handling + for k, diff := range d.Attributes { + // check the index value, which can be set, and 0 + if k == currentKey+".#" || k == currentKey+".%" || k == currentKey { + if diff.NewRemoved { + return result, nil + } + + if diff.NewComputed { + result[k[len(prefix):]] = config.UnknownVariableValue + return result, nil + } + + // do what the diff tells us to here, so that it's consistent with applies + if diff.New == "0" { + result[k[len(prefix):]] = "0" + return result, nil + } + } + } + + // collect all the keys from the diff and the old state + noDiff := true + keys := map[string]bool{} + for k := range d.Attributes { + if !strings.HasPrefix(k, currentKey+".") { + continue + } + noDiff = false + keys[k] = true + } + + noAttrs := true + for k := range attrs { + if !strings.HasPrefix(k, currentKey+".") { + continue + } + noAttrs = false + keys[k] = true + } + + // If there's no diff and no attrs, then there's no value at all. + // This prevents an unexpected zero-count attribute in the attributes. + if noDiff && noAttrs { + return result, nil + } + + idx := "#" + if attrSchema.Type.IsMapType() { + idx = "%" + } + + for k := range keys { + // generate an schema placeholder for the values + elSchema := &configschema.Attribute{ + Type: attrSchema.Type.ElementType(), + } + + res, err := d.applySingleAttrDiff(append(path, k[len(currentKey)+1:]), attrs, elSchema) + if err != nil { + return result, err + } + + for k, v := range res { + result[name+"."+k] = v + } + } + + // Just like in nested list blocks, for simple lists we may need to fill in + // missing empty strings. + countKey := name + "." + idx + count := result[countKey] + length, _ := strconv.Atoi(count) + + if count != "" && count != hcl2shim.UnknownVariableValue && + attrSchema.Type.Equals(cty.List(cty.String)) { + // insert empty strings into missing indexes + for i := 0; i < length; i++ { + key := fmt.Sprintf("%s.%d", name, i) + if _, ok := result[key]; !ok { + result[key] = "" + } + } + } + + // now check for truncation in any type of list + if attrSchema.Type.IsListType() { + for key := range result { + if key == countKey { + continue + } + + if len(key) <= len(name)+1 { + // not sure what this is, but don't panic + continue + } + + index := key[len(name)+1:] + + // It is possible to have nested sets or maps, so look for another dot + dot := strings.Index(index, ".") + if dot > 0 { + index = index[:dot] + } + + // This shouldn't have any more dots, since the element type is only string. + num, err := strconv.Atoi(index) + if err != nil { + log.Printf("[ERROR] bad list index in %q: %s", currentKey, err) + continue + } + + if num >= length { + delete(result, key) + } + } + } + + // Fill in the count value if it wasn't present in the diff for some reason, + // or if there is no count at all. + _, countDiff := d.Attributes[countKey] + if result[countKey] == "" || (!countDiff && len(keys) != len(result)) { + result[countKey] = countFlatmapContainerValues(countKey, result) + } + + return result, nil +} + +func (d *InstanceDiff) applySetDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + // We only need this special behavior for sets of object. + if !attrSchema.Type.ElementType().IsObjectType() { + // The normal collection apply behavior will work okay for this one, then. + return d.applyCollectionDiff(path, attrs, attrSchema) + } + + // When we're dealing with a set of an object type we actually want to + // use our normal _block type_ apply behaviors, so we'll construct ourselves + // a synthetic schema that treats the object type as a block type and + // then delegate to our block apply method. + synthSchema := &configschema.Block{ + Attributes: make(map[string]*configschema.Attribute), + } + + for name, ty := range attrSchema.Type.ElementType().AttributeTypes() { + // We can safely make everything into an attribute here because in the + // event that there are nested set attributes we'll end up back in + // here again recursively and can then deal with the next level of + // expansion. + synthSchema.Attributes[name] = &configschema.Attribute{ + Type: ty, + Optional: true, + } + } + + parentPath := path[:len(path)-1] + childName := path[len(path)-1] + containerSchema := &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + childName: { + Nesting: configschema.NestingSet, + Block: *synthSchema, + }, + }, + } + + return d.applyBlockDiff(parentPath, attrs, containerSchema) +} + +// countFlatmapContainerValues returns the number of values in the flatmapped container +// (set, map, list) indexed by key. The key argument is expected to include the +// trailing ".#", or ".%". +func countFlatmapContainerValues(key string, attrs map[string]string) string { + if len(key) < 3 || !(strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { + panic(fmt.Sprintf("invalid index value %q", key)) + } + + prefix := key[:len(key)-1] + items := map[string]int{} + + for k := range attrs { + if k == key { + continue + } + if !strings.HasPrefix(k, prefix) { + continue + } + + suffix := k[len(prefix):] + dot := strings.Index(suffix, ".") + if dot > 0 { + suffix = suffix[:dot] + } + + items[suffix]++ + } + return strconv.Itoa(len(items)) +} + // ResourceAttrDiff is the diff of a single attribute of a resource. type ResourceAttrDiff struct { Old string // Old Value diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval.go b/vendor/github.com/hashicorp/terraform/terraform/eval.go index 10d9c228b4..48ed3533ac 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval.go @@ -2,7 +2,8 @@ package terraform import ( "log" - "strings" + + "github.com/hashicorp/terraform/tfdiags" ) // EvalNode is the interface that must be implemented by graph nodes to @@ -46,15 +47,21 @@ func Eval(n EvalNode, ctx EvalContext) (interface{}, error) { func EvalRaw(n EvalNode, ctx EvalContext) (interface{}, error) { path := "unknown" if ctx != nil { - path = strings.Join(ctx.Path(), ".") + path = ctx.Path().String() + } + if path == "" { + path = "" } log.Printf("[TRACE] %s: eval: %T", path, n) output, err := n.Eval(ctx) if err != nil { - if _, ok := err.(EvalEarlyExitError); ok { - log.Printf("[TRACE] %s: eval: %T, err: %s", path, n, err) - } else { + switch err.(type) { + case EvalEarlyExitError: + log.Printf("[TRACE] %s: eval: %T, early exit err: %s", path, n, err) + case tfdiags.NonFatalError: + log.Printf("[WARN] %s: eval: %T, non-fatal err: %s", path, n, err) + default: log.Printf("[ERROR] %s: eval: %T, err: %s", path, n, err) } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go index b9b4806462..09313f7fc8 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go @@ -3,119 +3,316 @@ package terraform import ( "fmt" "log" - "strconv" + "strings" "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/plans/objchange" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/provisioners" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) // EvalApply is an EvalNode implementation that writes the diff to // the full diff. type EvalApply struct { - Info *InstanceInfo - State **InstanceState - Diff **InstanceDiff - Provider *ResourceProvider - Output **InstanceState - CreateNew *bool - Error *error + Addr addrs.ResourceInstance + Config *configs.Resource + Dependencies []addrs.Referenceable + State **states.ResourceInstanceObject + Change **plans.ResourceInstanceChange + ProviderAddr addrs.AbsProviderConfig + Provider *providers.Interface + ProviderSchema **ProviderSchema + Output **states.ResourceInstanceObject + CreateNew *bool + Error *error } // TODO: test func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) { - diff := *n.Diff + var diags tfdiags.Diagnostics + + change := *n.Change provider := *n.Provider state := *n.State + absAddr := n.Addr.Absolute(ctx.Path()) - // If we have no diff, we have nothing to do! - if diff.Empty() { - log.Printf( - "[DEBUG] apply: %s: diff is empty, doing nothing.", n.Info.Id) - return nil, nil + if state == nil { + state = &states.ResourceInstanceObject{} + } + + schema, _ := (*n.ProviderSchema).SchemaForResourceType(n.Addr.Resource.Mode, n.Addr.Resource.Type) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) + } + + if n.CreateNew != nil { + *n.CreateNew = (change.Action == plans.Create || change.Action.IsReplace()) } - // Remove any output values from the diff - for k, ad := range diff.CopyAttributes() { - if ad.Type == DiffAttrOutput { - diff.DelAttribute(k) + configVal := cty.NullVal(cty.DynamicPseudoType) + if n.Config != nil { + var configDiags tfdiags.Diagnostics + keyData := EvalDataForInstanceKey(n.Addr.Key) + configVal, _, configDiags = ctx.EvaluateBlock(n.Config.Config, schema, nil, keyData) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, diags.Err() } } - // If the state is nil, make it non-nil - if state == nil { - state = new(InstanceState) + log.Printf("[DEBUG] %s: applying the planned %s change", n.Addr.Absolute(ctx.Path()), change.Action) + resp := provider.ApplyResourceChange(providers.ApplyResourceChangeRequest{ + TypeName: n.Addr.Resource.Type, + PriorState: change.Before, + Config: configVal, + PlannedState: change.After, + PlannedPrivate: change.Private, + }) + applyDiags := resp.Diagnostics + if n.Config != nil { + applyDiags = applyDiags.InConfigBody(n.Config.Config) } - state.init() + diags = diags.Append(applyDiags) + + // Even if there are errors in the returned diagnostics, the provider may + // have returned a _partial_ state for an object that already exists but + // failed to fully configure, and so the remaining code must always run + // to completion but must be defensive against the new value being + // incomplete. + newVal := resp.NewState + + if newVal == cty.NilVal { + // Providers are supposed to return a partial new value even when errors + // occur, but sometimes they don't and so in that case we'll patch that up + // by just using the prior state, so we'll at least keep track of the + // object for the user to retry. + newVal = change.Before + + // As a special case, we'll set the new value to null if it looks like + // we were trying to execute a delete, because the provider in this case + // probably left the newVal unset intending it to be interpreted as "null". + if change.After.IsNull() { + newVal = cty.NullVal(schema.ImpliedType()) + } - // Flag if we're creating a new instance - if n.CreateNew != nil { - *n.CreateNew = state.ID == "" && !diff.GetDestroy() || diff.RequiresNew() + // Ideally we'd produce an error or warning here if newVal is nil and + // there are no errors in diags, because that indicates a buggy + // provider not properly reporting its result, but unfortunately many + // of our historical test mocks behave in this way and so producing + // a diagnostic here fails hundreds of tests. Instead, we must just + // silently retain the old value for now. Returning a nil value with + // no errors is still always considered a bug in the provider though, + // and should be fixed for any "real" providers that do it. } - // With the completed diff, apply! - log.Printf("[DEBUG] apply: %s: executing Apply", n.Info.Id) - state, err := provider.Apply(n.Info, state, diff) - if state == nil { - state = new(InstanceState) + var conformDiags tfdiags.Diagnostics + for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) { + conformDiags = conformDiags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid object", + fmt.Sprintf( + "Provider %q produced an invalid value after apply for %s. The result cannot not be saved in the Terraform state.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), + ), + )) + } + diags = diags.Append(conformDiags) + if conformDiags.HasErrors() { + // Bail early in this particular case, because an object that doesn't + // conform to the schema can't be saved in the state anyway -- the + // serializer will reject it. + return nil, diags.Err() + } + + // After this point we have a type-conforming result object and so we + // must always run to completion to ensure it can be saved. If n.Error + // is set then we must not return a non-nil error, in order to allow + // evaluation to continue to a later point where our state object will + // be saved. + + // By this point there must not be any unknown values remaining in our + // object, because we've applied the change and we can't save unknowns + // in our persistent state. If any are present then we will indicate an + // error (which is always a bug in the provider) but we will also replace + // them with nulls so that we can successfully save the portions of the + // returned value that are known. + if !newVal.IsWhollyKnown() { + // To generate better error messages, we'll go for a walk through the + // value and make a separate diagnostic for each unknown value we + // find. + cty.Walk(newVal, func(path cty.Path, val cty.Value) (bool, error) { + if !val.IsKnown() { + pathStr := tfdiags.FormatCtyPath(path) + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider returned invalid result object after apply", + fmt.Sprintf( + "After the apply operation, the provider still indicated an unknown value for %s%s. All values must be known after apply, so this is always a bug in the provider and should be reported in the provider's own repository. Terraform will still save the other known object values in the state.", + n.Addr.Absolute(ctx.Path()), pathStr, + ), + )) + } + return true, nil + }) + + // NOTE: This operation can potentially be lossy if there are multiple + // elements in a set that differ only by unknown values: after + // replacing with null these will be merged together into a single set + // element. Since we can only get here in the presence of a provider + // bug, we accept this because storing a result here is always a + // best-effort sort of thing. + newVal = cty.UnknownAsNull(newVal) + } + + if change.Action != plans.Delete && !diags.HasErrors() { + // Only values that were marked as unknown in the planned value are allowed + // to change during the apply operation. (We do this after the unknown-ness + // check above so that we also catch anything that became unknown after + // being known during plan.) + // + // If we are returning other errors anyway then we'll give this + // a pass since the other errors are usually the explanation for + // this one and so it's more helpful to let the user focus on the + // root cause rather than distract with this extra problem. + if errs := objchange.AssertObjectCompatible(schema, change.After, newVal); len(errs) > 0 { + if resp.LegacyTypeSystem { + // The shimming of the old type system in the legacy SDK is not precise + // enough to pass this consistency check, so we'll give it a pass here, + // but we will generate a warning about it so that we are more likely + // to notice in the logs if an inconsistency beyond the type system + // leads to a downstream provider failure. + var buf strings.Builder + fmt.Fprintf(&buf, "[WARN] Provider %q produced an unexpected new value for %s, but we are tolerating it because it is using the legacy plugin SDK.\n The following problems may be the cause of any confusing errors from downstream operations:", n.ProviderAddr.ProviderConfig.Type, absAddr) + for _, err := range errs { + fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err)) + } + log.Print(buf.String()) + + // The sort of inconsistency we won't catch here is if a known value + // in the plan is changed during apply. That can cause downstream + // problems because a dependent resource would make its own plan based + // on the planned value, and thus get a different result during the + // apply phase. This will usually lead to a "Provider produced invalid plan" + // error that incorrectly blames the downstream resource for the change. + + } else { + for _, err := range errs { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced inconsistent result after apply", + fmt.Sprintf( + "When applying changes to %s, provider %q produced an unexpected new value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + absAddr, n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatError(err), + ), + )) + } + } + } + } + + // If a provider returns a null or non-null object at the wrong time then + // we still want to save that but it often causes some confusing behaviors + // where it seems like Terraform is failing to take any action at all, + // so we'll generate some errors to draw attention to it. + if !diags.HasErrors() { + if change.Action == plans.Delete && !newVal.IsNull() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider returned invalid result object after apply", + fmt.Sprintf( + "After applying a %s plan, the provider returned a non-null object for %s. Destroying should always produce a null value, so this is always a bug in the provider and should be reported in the provider's own repository. Terraform will still save this errant object in the state for debugging and recovery.", + change.Action, n.Addr.Absolute(ctx.Path()), + ), + )) + } + if change.Action != plans.Delete && newVal.IsNull() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider returned invalid result object after apply", + fmt.Sprintf( + "After applying a %s plan, the provider returned a null object for %s. Only destroying should always produce a null value, so this is always a bug in the provider and should be reported in the provider's own repository.", + change.Action, n.Addr.Absolute(ctx.Path()), + ), + )) + } } - state.init() - // Force the "id" attribute to be our ID - if state.ID != "" { - state.Attributes["id"] = state.ID + // Sometimes providers return a null value when an operation fails for some + // reason, but we'd rather keep the prior state so that the error can be + // corrected on a subsequent run. We must only do this for null new value + // though, or else we may discard partial updates the provider was able to + // complete. + if diags.HasErrors() && newVal.IsNull() { + // Otherwise, we'll continue but using the prior state as the new value, + // making this effectively a no-op. If the item really _has_ been + // deleted then our next refresh will detect that and fix it up. + // If change.Action is Create then change.Before will also be null, + // which is fine. + newVal = change.Before } - // If the value is the unknown variable value, then it is an error. - // In this case we record the error and remove it from the state - for ak, av := range state.Attributes { - if av == config.UnknownVariableValue { - err = multierror.Append(err, fmt.Errorf( - "Attribute with unknown value: %s", ak)) - delete(state.Attributes, ak) + var newState *states.ResourceInstanceObject + if !newVal.IsNull() { // null value indicates that the object is deleted, so we won't set a new state in that case + newState = &states.ResourceInstanceObject{ + Status: states.ObjectReady, + Value: newVal, + Private: resp.Private, + Dependencies: n.Dependencies, // Should be populated by the caller from the StateDependencies method on the resource instance node } } // Write the final state if n.Output != nil { - *n.Output = state + *n.Output = newState } - // If there are no errors, then we append it to our output error - // if we have one, otherwise we just output it. - if err != nil { + if diags.HasErrors() { + // If the caller provided an error pointer then they are expected to + // handle the error some other way and we treat our own result as + // success. if n.Error != nil { - helpfulErr := fmt.Errorf("%s: %s", n.Info.Id, err.Error()) - *n.Error = multierror.Append(*n.Error, helpfulErr) - } else { - return nil, err + err := diags.Err() + *n.Error = err + log.Printf("[DEBUG] %s: apply errored, but we're indicating that via the Error pointer rather than returning it: %s", n.Addr.Absolute(ctx.Path()), err) + return nil, nil } } - return nil, nil + return nil, diags.ErrWithWarnings() } // EvalApplyPre is an EvalNode implementation that does the pre-Apply work type EvalApplyPre struct { - Info *InstanceInfo - State **InstanceState - Diff **InstanceDiff + Addr addrs.ResourceInstance + Gen states.Generation + State **states.ResourceInstanceObject + Change **plans.ResourceInstanceChange } // TODO: test func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) { - state := *n.State - diff := *n.Diff + change := *n.Change + absAddr := n.Addr.Absolute(ctx.Path()) - // If the state is nil, make it non-nil - if state == nil { - state = new(InstanceState) + if change == nil { + panic(fmt.Sprintf("EvalApplyPre for %s called with nil Change", absAddr)) } - state.init() - if resourceHasUserVisibleApply(n.Info) { - // Call post-apply hook + if resourceHasUserVisibleApply(n.Addr) { + priorState := change.Before + plannedNewState := change.After + err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreApply(n.Info, state, diff) + return h.PreApply(absAddr, n.Gen, change.Action, priorState, plannedNewState) }) if err != nil { return nil, err @@ -127,8 +324,9 @@ func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) { // EvalApplyPost is an EvalNode implementation that does the post-Apply work type EvalApplyPost struct { - Info *InstanceInfo - State **InstanceState + Addr addrs.ResourceInstance + Gen states.Generation + State **states.ResourceInstanceObject Error *error } @@ -136,33 +334,93 @@ type EvalApplyPost struct { func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) { state := *n.State - if resourceHasUserVisibleApply(n.Info) { - // Call post-apply hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostApply(n.Info, state, *n.Error) + if resourceHasUserVisibleApply(n.Addr) { + absAddr := n.Addr.Absolute(ctx.Path()) + var newState cty.Value + if state != nil { + newState = state.Value + } else { + newState = cty.NullVal(cty.DynamicPseudoType) + } + var err error + if n.Error != nil { + err = *n.Error + } + + hookErr := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostApply(absAddr, n.Gen, newState, err) }) - if err != nil { - return nil, err + if hookErr != nil { + return nil, hookErr } } return nil, *n.Error } +// EvalMaybeTainted is an EvalNode that takes the planned change, new value, +// and possible error from an apply operation and produces a new instance +// object marked as tainted if it appears that a create operation has failed. +// +// This EvalNode never returns an error, to ensure that a subsequent EvalNode +// can still record the possibly-tainted object in the state. +type EvalMaybeTainted struct { + Addr addrs.ResourceInstance + Gen states.Generation + Change **plans.ResourceInstanceChange + State **states.ResourceInstanceObject + Error *error + + // If StateOutput is not nil, its referent will be assigned either the same + // pointer as State or a new object with its status set as Tainted, + // depending on whether an error is given and if this was a create action. + StateOutput **states.ResourceInstanceObject +} + +// TODO: test +func (n *EvalMaybeTainted) Eval(ctx EvalContext) (interface{}, error) { + state := *n.State + change := *n.Change + err := *n.Error + + if state != nil && state.Status == states.ObjectTainted { + log.Printf("[TRACE] EvalMaybeTainted: %s was already tainted, so nothing to do", n.Addr.Absolute(ctx.Path())) + return nil, nil + } + + if n.StateOutput != nil { + if err != nil && change.Action == plans.Create { + // If there are errors during a _create_ then the object is + // in an undefined state, and so we'll mark it as tainted so + // we can try again on the next run. + // + // We don't do this for other change actions because errors + // during updates will often not change the remote object at all. + // If there _were_ changes prior to the error, it's the provider's + // responsibility to record the effect of those changes in the + // object value it returned. + log.Printf("[TRACE] EvalMaybeTainted: %s encountered an error during creation, so it is now marked as tainted", n.Addr.Absolute(ctx.Path())) + *n.StateOutput = state.AsTainted() + } else { + *n.StateOutput = state + } + } + + return nil, nil +} + // resourceHasUserVisibleApply returns true if the given resource is one where // apply actions should be exposed to the user. // // Certain resources do apply actions only as an implementation detail, so // these should not be advertised to code outside of this package. -func resourceHasUserVisibleApply(info *InstanceInfo) bool { - addr := info.ResourceAddress() - +func resourceHasUserVisibleApply(addr addrs.ResourceInstance) bool { // Only managed resources have user-visible apply actions. // In particular, this excludes data resources since we "apply" these // only as an implementation detail of removing them from state when // they are destroyed. (When reading, they don't get here at all because // we present them as "Refresh" actions.) - return addr.Mode == config.ManagedResourceMode + return addr.ContainingResource().Mode == addrs.ManagedResourceMode } // EvalApplyProvisioners is an EvalNode implementation that executes @@ -171,23 +429,33 @@ func resourceHasUserVisibleApply(info *InstanceInfo) bool { // TODO(mitchellh): This should probably be split up into a more fine-grained // ApplyProvisioner (single) that is looped over. type EvalApplyProvisioners struct { - Info *InstanceInfo - State **InstanceState - Resource *config.Resource - InterpResource *Resource + Addr addrs.ResourceInstance + State **states.ResourceInstanceObject + ResourceConfig *configs.Resource CreateNew *bool Error *error // When is the type of provisioner to run at this point - When config.ProvisionerWhen + When configs.ProvisionerWhen } // TODO: test func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) { + absAddr := n.Addr.Absolute(ctx.Path()) state := *n.State - - if n.CreateNew != nil && !*n.CreateNew { + if state == nil { + log.Printf("[TRACE] EvalApplyProvisioners: %s has no state, so skipping provisioners", n.Addr) + return nil, nil + } + if n.When == configs.ProvisionerWhenCreate && n.CreateNew != nil && !*n.CreateNew { // If we're not creating a new resource, then don't run provisioners + log.Printf("[TRACE] EvalApplyProvisioners: %s is not freshly-created, so no provisioning is required", n.Addr) + return nil, nil + } + if state.Status == states.ObjectTainted { + // No point in provisioning an object that is already tainted, since + // it's going to get recreated on the next apply anyway. + log.Printf("[TRACE] EvalApplyProvisioners: %s is tainted, so skipping provisioning", n.Addr) return nil, nil } @@ -197,14 +465,7 @@ func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) { return nil, nil } - // taint tells us whether to enable tainting. - taint := n.When == config.ProvisionerWhenCreate - if n.Error != nil && *n.Error != nil { - if taint { - state.Tainted = true - } - // We're already tainted, so just return out return nil, nil } @@ -212,7 +473,7 @@ func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) { { // Call pre hook err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreProvisionResource(n.Info, state) + return h.PreProvisionInstance(absAddr, state.Value) }) if err != nil { return nil, err @@ -223,18 +484,19 @@ func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) { // if we have one, otherwise we just output it. err := n.apply(ctx, provs) if err != nil { - if taint { - state.Tainted = true - } - *n.Error = multierror.Append(*n.Error, err) - return nil, err + if n.Error == nil { + return nil, err + } else { + log.Printf("[TRACE] EvalApplyProvisioners: %s provisioning failed, but we will continue anyway at the caller's request", absAddr) + return nil, nil + } } { // Call post hook err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostProvisionResource(n.Info, state) + return h.PostProvisionInstance(absAddr, state.Value) }) if err != nil { return nil, err @@ -246,18 +508,18 @@ func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) { // filterProvisioners filters the provisioners on the resource to only // the provisioners specified by the "when" option. -func (n *EvalApplyProvisioners) filterProvisioners() []*config.Provisioner { +func (n *EvalApplyProvisioners) filterProvisioners() []*configs.Provisioner { // Fast path the zero case - if n.Resource == nil { + if n.ResourceConfig == nil || n.ResourceConfig.Managed == nil { return nil } - if len(n.Resource.Provisioners) == 0 { + if len(n.ResourceConfig.Managed.Provisioners) == 0 { return nil } - result := make([]*config.Provisioner, 0, len(n.Resource.Provisioners)) - for _, p := range n.Resource.Provisioners { + result := make([]*configs.Provisioner, 0, len(n.ResourceConfig.Managed.Provisioners)) + for _, p := range n.ResourceConfig.Managed.Provisioners { if p.When == n.When { result = append(result, p) } @@ -266,64 +528,71 @@ func (n *EvalApplyProvisioners) filterProvisioners() []*config.Provisioner { return result } -func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*config.Provisioner) error { - state := *n.State - - // Store the original connection info, restore later - origConnInfo := state.Ephemeral.ConnInfo - defer func() { - state.Ephemeral.ConnInfo = origConnInfo - }() +func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*configs.Provisioner) error { + var diags tfdiags.Diagnostics + instanceAddr := n.Addr + absAddr := instanceAddr.Absolute(ctx.Path()) + + // If there's a connection block defined directly inside the resource block + // then it'll serve as a base connection configuration for all of the + // provisioners. + var baseConn hcl.Body + if n.ResourceConfig.Managed != nil && n.ResourceConfig.Managed.Connection != nil { + baseConn = n.ResourceConfig.Managed.Connection.Config + } for _, prov := range provs { + log.Printf("[TRACE] EvalApplyProvisioners: provisioning %s with %q", absAddr, prov.Type) + // Get the provisioner provisioner := ctx.Provisioner(prov.Type) + schema := ctx.ProvisionerSchema(prov.Type) - // Interpolate the provisioner config - provConfig, err := ctx.Interpolate(prov.RawConfig.Copy(), n.InterpResource) - if err != nil { - return err - } + keyData := EvalDataForInstanceKey(instanceAddr.Key) - // Interpolate the conn info, since it may contain variables - connInfo, err := ctx.Interpolate(prov.ConnInfo.Copy(), n.InterpResource) - if err != nil { - return err + // Evaluate the main provisioner configuration. + config, _, configDiags := ctx.EvaluateBlock(prov.Config, schema, instanceAddr, keyData) + diags = diags.Append(configDiags) + + // If the provisioner block contains a connection block of its own then + // it can override the base connection configuration, if any. + var localConn hcl.Body + if prov.Connection != nil { + localConn = prov.Connection.Config } - // Merge the connection information - overlay := make(map[string]string) - if origConnInfo != nil { - for k, v := range origConnInfo { - overlay[k] = v - } + var connBody hcl.Body + switch { + case baseConn != nil && localConn != nil: + // Our standard merging logic applies here, similar to what we do + // with _override.tf configuration files: arguments from the + // base connection block will be masked by any arguments of the + // same name in the local connection block. + connBody = configs.MergeBodies(baseConn, localConn) + case baseConn != nil: + connBody = baseConn + case localConn != nil: + connBody = localConn } - for k, v := range connInfo.Config { - switch vt := v.(type) { - case string: - overlay[k] = vt - case int64: - overlay[k] = strconv.FormatInt(vt, 10) - case int32: - overlay[k] = strconv.FormatInt(int64(vt), 10) - case int: - overlay[k] = strconv.FormatInt(int64(vt), 10) - case float32: - overlay[k] = strconv.FormatFloat(float64(vt), 'f', 3, 32) - case float64: - overlay[k] = strconv.FormatFloat(vt, 'f', 3, 64) - case bool: - overlay[k] = strconv.FormatBool(vt) - default: - overlay[k] = fmt.Sprintf("%v", vt) + + // start with an empty connInfo + connInfo := cty.NullVal(connectionBlockSupersetSchema.ImpliedType()) + + if connBody != nil { + var connInfoDiags tfdiags.Diagnostics + connInfo, _, connInfoDiags = ctx.EvaluateBlock(connBody, connectionBlockSupersetSchema, instanceAddr, keyData) + diags = diags.Append(connInfoDiags) + if diags.HasErrors() { + // "on failure continue" setting only applies to failures of the + // provisioner itself, not to invalid configuration. + return diags.Err() } } - state.Ephemeral.ConnInfo = overlay { // Call pre hook err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreProvision(n.Info, prov.Type) + return h.PreProvisionInstanceStep(absAddr, prov.Type) }) if err != nil { return err @@ -333,31 +602,37 @@ func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*config.Provision // The output function outputFn := func(msg string) { ctx.Hook(func(h Hook) (HookAction, error) { - h.ProvisionOutput(n.Info, prov.Type, msg) + h.ProvisionOutput(absAddr, prov.Type, msg) return HookActionContinue, nil }) } - // Invoke the Provisioner output := CallbackUIOutput{OutputFn: outputFn} - applyErr := provisioner.Apply(&output, state, provConfig) + resp := provisioner.ProvisionResource(provisioners.ProvisionResourceRequest{ + Config: config, + Connection: connInfo, + UIOutput: &output, + }) + applyDiags := resp.Diagnostics.InConfigBody(prov.Config) // Call post hook hookErr := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostProvision(n.Info, prov.Type, applyErr) + return h.PostProvisionInstanceStep(absAddr, prov.Type, applyDiags.Err()) }) - // Handle the error before we deal with the hook - if applyErr != nil { - // Determine failure behavior - switch prov.OnFailure { - case config.ProvisionerOnFailureContinue: - log.Printf( - "[INFO] apply: %s [%s]: error during provision, continue requested", - n.Info.Id, prov.Type) - - case config.ProvisionerOnFailureFail: - return applyErr + switch prov.OnFailure { + case configs.ProvisionerOnFailureContinue: + if applyDiags.HasErrors() { + log.Printf("[WARN] Errors while provisioning %s with %q, but continuing as requested in configuration", n.Addr, prov.Type) + } else { + // Maybe there are warnings that we still want to see + diags = diags.Append(applyDiags) + } + default: + diags = diags.Append(applyDiags) + if applyDiags.HasErrors() { + log.Printf("[WARN] Errors while provisioning %s with %q, so aborting", n.Addr, prov.Type) + return diags.Err() } } @@ -367,6 +642,5 @@ func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*config.Provision } } - return nil - + return diags.ErrWithWarnings() } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go index 715e79e174..4dff0c84d1 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go @@ -3,33 +3,44 @@ package terraform import ( "fmt" - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/plans" + + "github.com/hashicorp/hcl2/hcl" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/tfdiags" ) // EvalPreventDestroy is an EvalNode implementation that returns an // error if a resource has PreventDestroy configured and the diff // would destroy the resource. type EvalCheckPreventDestroy struct { - Resource *config.Resource - ResourceId string - Diff **InstanceDiff + Addr addrs.ResourceInstance + Config *configs.Resource + Change **plans.ResourceInstanceChange } func (n *EvalCheckPreventDestroy) Eval(ctx EvalContext) (interface{}, error) { - if n.Diff == nil || *n.Diff == nil || n.Resource == nil { + if n.Change == nil || *n.Change == nil || n.Config == nil || n.Config.Managed == nil { return nil, nil } - diff := *n.Diff - preventDestroy := n.Resource.Lifecycle.PreventDestroy - - if diff.GetDestroy() && preventDestroy { - resourceId := n.ResourceId - if resourceId == "" { - resourceId = n.Resource.Id() - } - - return nil, fmt.Errorf(preventDestroyErrStr, resourceId) + change := *n.Change + preventDestroy := n.Config.Managed.PreventDestroy + + if (change.Action == plans.Delete || change.Action.IsReplace()) && preventDestroy { + var diags tfdiags.Diagnostics + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Instance cannot be destroyed", + Detail: fmt.Sprintf( + "Resource %s has lifecycle.prevent_destroy set, but the plan calls for this resource to be destroyed. To avoid this error and continue with the plan, either disable lifecycle.prevent_destroy or reduce the scope of the plan using the -target flag.", + n.Addr.Absolute(ctx.Path()).String(), + ), + Subject: &n.Config.DeclRange, + }) + return nil, diags.Err() } return nil, nil diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go index 86481dedb4..08f3059e26 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_context.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go @@ -1,9 +1,16 @@ package terraform import ( - "sync" - - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/lang" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/provisioners" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" ) // EvalContext is the interface that is given to eval nodes to execute. @@ -13,7 +20,7 @@ type EvalContext interface { Stopped() <-chan struct{} // Path is the current module path. - Path() []string + Path() addrs.ModuleInstance // Hook is used to call hook methods. The callback is called for each // hook and should return the hook action to take and the error. @@ -22,68 +29,105 @@ type EvalContext interface { // Input is the UIInput object for interacting with the UI. Input() UIInput - // InitProvider initializes the provider with the given type and name, and + // InitProvider initializes the provider with the given type and address, and // returns the implementation of the resource provider or an error. // // It is an error to initialize the same provider more than once. - InitProvider(typ string, name string) (ResourceProvider, error) + InitProvider(typ string, addr addrs.ProviderConfig) (providers.Interface, error) - // Provider gets the provider instance with the given name (already + // Provider gets the provider instance with the given address (already // initialized) or returns nil if the provider isn't initialized. - Provider(string) ResourceProvider + // + // This method expects an _absolute_ provider configuration address, since + // resources in one module are able to use providers from other modules. + // InitProvider must've been called on the EvalContext of the module + // that owns the given provider before calling this method. + Provider(addrs.AbsProviderConfig) providers.Interface + + // ProviderSchema retrieves the schema for a particular provider, which + // must have already been initialized with InitProvider. + // + // This method expects an _absolute_ provider configuration address, since + // resources in one module are able to use providers from other modules. + ProviderSchema(addrs.AbsProviderConfig) *ProviderSchema // CloseProvider closes provider connections that aren't needed anymore. - CloseProvider(string) error + CloseProvider(addrs.ProviderConfig) error // ConfigureProvider configures the provider with the given // configuration. This is a separate context call because this call // is used to store the provider configuration for inheritance lookups // with ParentProviderConfig(). - ConfigureProvider(string, *ResourceConfig) error + ConfigureProvider(addrs.ProviderConfig, cty.Value) tfdiags.Diagnostics // ProviderInput and SetProviderInput are used to configure providers // from user input. - ProviderInput(string) map[string]interface{} - SetProviderInput(string, map[string]interface{}) + ProviderInput(addrs.ProviderConfig) map[string]cty.Value + SetProviderInput(addrs.ProviderConfig, map[string]cty.Value) // InitProvisioner initializes the provisioner with the given name and // returns the implementation of the resource provisioner or an error. // // It is an error to initialize the same provisioner more than once. - InitProvisioner(string) (ResourceProvisioner, error) + InitProvisioner(string) (provisioners.Interface, error) // Provisioner gets the provisioner instance with the given name (already // initialized) or returns nil if the provisioner isn't initialized. - Provisioner(string) ResourceProvisioner + Provisioner(string) provisioners.Interface + + // ProvisionerSchema retrieves the main configuration schema for a + // particular provisioner, which must have already been initialized with + // InitProvisioner. + ProvisionerSchema(string) *configschema.Block // CloseProvisioner closes provisioner connections that aren't needed // anymore. CloseProvisioner(string) error - // Interpolate takes the given raw configuration and completes - // the interpolations, returning the processed ResourceConfig. + // EvaluateBlock takes the given raw configuration block and associated + // schema and evaluates it to produce a value of an object type that + // conforms to the implied type of the schema. + // + // The "self" argument is optional. If given, it is the referenceable + // address that the name "self" should behave as an alias for when + // evaluating. Set this to nil if the "self" object should not be available. + // + // The "key" argument is also optional. If given, it is the instance key + // of the current object within the multi-instance container it belongs + // to. For example, on a resource block with "count" set this should be + // set to a different addrs.IntKey for each instance created from that + // block. Set this to addrs.NoKey if not appropriate. + // + // The returned body is an expanded version of the given body, with any + // "dynamic" blocks replaced with zero or more static blocks. This can be + // used to extract correct source location information about attributes of + // the returned object value. + EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) + + // EvaluateExpr takes the given HCL expression and evaluates it to produce + // a value. + // + // The "self" argument is optional. If given, it is the referenceable + // address that the name "self" should behave as an alias for when + // evaluating. Set this to nil if the "self" object should not be available. + EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) + + // EvaluationScope returns a scope that can be used to evaluate reference + // addresses in this context. + EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope + + // SetModuleCallArguments defines values for the variables of a particular + // child module call. // - // The resource argument is optional. If given, it is the resource - // that is currently being acted upon. - Interpolate(*config.RawConfig, *Resource) (*ResourceConfig, error) - - // InterpolateProvider takes a ProviderConfig and interpolates it with the - // stored interpolation scope. Since provider configurations can be - // inherited, the interpolation scope may be different from the current - // context path. Interplation is otherwise executed the same as in the - // Interpolation method. - InterpolateProvider(*config.ProviderConfig, *Resource) (*ResourceConfig, error) - - // SetVariables sets the variables for the module within - // this context with the name n. This function call is additive: - // the second parameter is merged with any previous call. - SetVariables(string, map[string]interface{}) - - // Diff returns the global diff as well as the lock that should - // be used to modify that diff. - Diff() (*Diff, *sync.RWMutex) - - // State returns the global state as well as the lock that should - // be used to modify that state. - State() (*State, *sync.RWMutex) + // Calling this function multiple times has merging behavior, keeping any + // previously-set keys that are not present in the new map. + SetModuleCallArguments(addrs.ModuleCallInstance, map[string]cty.Value) + + // Changes returns the writer object that can be used to write new proposed + // changes into the global changes set. + Changes() *plans.ChangesSync + + // State returns a wrapper object that provides safe concurrent access to + // the global state. + State() *states.SyncState } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go index 1b6ee5a625..20b37933d0 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go @@ -6,7 +6,20 @@ import ( "log" "sync" - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/provisioners" + "github.com/hashicorp/terraform/version" + + "github.com/hashicorp/terraform/states" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/lang" + "github.com/hashicorp/terraform/tfdiags" + + "github.com/hashicorp/terraform/addrs" + "github.com/zclconf/go-cty/cty" ) // BuiltinEvalContext is an EvalContext implementation that is used by @@ -16,35 +29,47 @@ type BuiltinEvalContext struct { StopContext context.Context // PathValue is the Path that this context is operating within. - PathValue []string + PathValue addrs.ModuleInstance - // Interpolater setting below affect the interpolation of variables. + // Evaluator is used for evaluating expressions within the scope of this + // eval context. + Evaluator *Evaluator + + // Schemas is a repository of all of the schemas we should need to + // decode configuration blocks and expressions. This must be constructed by + // the caller to include schemas for all of the providers, resource types, + // data sources and provisioners used by the given configuration and + // state. // - // The InterpolaterVars are the exact value for ${var.foo} values. - // The map is shared between all contexts and is a mapping of - // PATH to KEY to VALUE. Because it is shared by all contexts as well - // as the Interpolater itself, it is protected by InterpolaterVarLock - // which must be locked during any access to the map. - Interpolater *Interpolater - InterpolaterVars map[string]map[string]interface{} - InterpolaterVarLock *sync.Mutex + // This must not be mutated during evaluation. + Schemas *Schemas + + // VariableValues contains the variable values across all modules. This + // structure is shared across the entire containing context, and so it + // may be accessed only when holding VariableValuesLock. + // The keys of the first level of VariableValues are the string + // representations of addrs.ModuleInstance values. The second-level keys + // are variable names within each module instance. + VariableValues map[string]map[string]cty.Value + VariableValuesLock *sync.Mutex Components contextComponentFactory Hooks []Hook InputValue UIInput - ProviderCache map[string]ResourceProvider - ProviderInputConfig map[string]map[string]interface{} + ProviderCache map[string]providers.Interface + ProviderInputConfig map[string]map[string]cty.Value ProviderLock *sync.Mutex - ProvisionerCache map[string]ResourceProvisioner + ProvisionerCache map[string]provisioners.Interface ProvisionerLock *sync.Mutex - DiffValue *Diff - DiffLock *sync.RWMutex - StateValue *State - StateLock *sync.RWMutex + ChangesValue *plans.ChangesSync + StateValue *states.SyncState once sync.Once } +// BuiltinEvalContext implements EvalContext +var _ EvalContext = (*BuiltinEvalContext)(nil) + func (ctx *BuiltinEvalContext) Stopped() <-chan struct{} { // This can happen during tests. During tests, we just block forever. if ctx.StopContext == nil { @@ -78,12 +103,13 @@ func (ctx *BuiltinEvalContext) Input() UIInput { return ctx.InputValue } -func (ctx *BuiltinEvalContext) InitProvider(typeName, name string) (ResourceProvider, error) { +func (ctx *BuiltinEvalContext) InitProvider(typeName string, addr addrs.ProviderConfig) (providers.Interface, error) { ctx.once.Do(ctx.init) + absAddr := addr.Absolute(ctx.Path()) // If we already initialized, it is an error - if p := ctx.Provider(name); p != nil { - return nil, fmt.Errorf("Provider '%s' already initialized", name) + if p := ctx.Provider(absAddr); p != nil { + return nil, fmt.Errorf("%s is already initialized", addr) } // Warning: make sure to acquire these locks AFTER the call to Provider @@ -91,85 +117,102 @@ func (ctx *BuiltinEvalContext) InitProvider(typeName, name string) (ResourceProv ctx.ProviderLock.Lock() defer ctx.ProviderLock.Unlock() - p, err := ctx.Components.ResourceProvider(typeName, name) + key := absAddr.String() + + p, err := ctx.Components.ResourceProvider(typeName, key) if err != nil { return nil, err } - ctx.ProviderCache[name] = p + log.Printf("[TRACE] BuiltinEvalContext: Initialized %q provider for %s", typeName, absAddr) + ctx.ProviderCache[key] = p + return p, nil } -func (ctx *BuiltinEvalContext) Provider(n string) ResourceProvider { +func (ctx *BuiltinEvalContext) Provider(addr addrs.AbsProviderConfig) providers.Interface { ctx.once.Do(ctx.init) ctx.ProviderLock.Lock() defer ctx.ProviderLock.Unlock() - return ctx.ProviderCache[n] + return ctx.ProviderCache[addr.String()] +} + +func (ctx *BuiltinEvalContext) ProviderSchema(addr addrs.AbsProviderConfig) *ProviderSchema { + ctx.once.Do(ctx.init) + + return ctx.Schemas.ProviderSchema(addr.ProviderConfig.Type) } -func (ctx *BuiltinEvalContext) CloseProvider(n string) error { +func (ctx *BuiltinEvalContext) CloseProvider(addr addrs.ProviderConfig) error { ctx.once.Do(ctx.init) ctx.ProviderLock.Lock() defer ctx.ProviderLock.Unlock() - var provider interface{} - provider = ctx.ProviderCache[n] + key := addr.Absolute(ctx.Path()).String() + provider := ctx.ProviderCache[key] if provider != nil { - if p, ok := provider.(ResourceProviderCloser); ok { - delete(ctx.ProviderCache, n) - return p.Close() - } + delete(ctx.ProviderCache, key) + return provider.Close() } return nil } -func (ctx *BuiltinEvalContext) ConfigureProvider( - n string, cfg *ResourceConfig) error { - p := ctx.Provider(n) +func (ctx *BuiltinEvalContext) ConfigureProvider(addr addrs.ProviderConfig, cfg cty.Value) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + absAddr := addr.Absolute(ctx.Path()) + p := ctx.Provider(absAddr) if p == nil { - return fmt.Errorf("Provider '%s' not initialized", n) + diags = diags.Append(fmt.Errorf("%s not initialized", addr)) + return diags } - return p.Configure(cfg) + + providerSchema := ctx.ProviderSchema(absAddr) + if providerSchema == nil { + diags = diags.Append(fmt.Errorf("schema for %s is not available", absAddr)) + return diags + } + + req := providers.ConfigureRequest{ + TerraformVersion: version.String(), + Config: cfg, + } + + resp := p.Configure(req) + return resp.Diagnostics } -func (ctx *BuiltinEvalContext) ProviderInput(n string) map[string]interface{} { +func (ctx *BuiltinEvalContext) ProviderInput(pc addrs.ProviderConfig) map[string]cty.Value { ctx.ProviderLock.Lock() defer ctx.ProviderLock.Unlock() - // Make a copy of the path so we can safely edit it - path := ctx.Path() - pathCopy := make([]string, len(path)+1) - copy(pathCopy, path) - - // Go up the tree. - for i := len(path) - 1; i >= 0; i-- { - pathCopy[i+1] = n - k := PathCacheKey(pathCopy[:i+2]) - if v, ok := ctx.ProviderInputConfig[k]; ok { - return v - } + if !ctx.Path().IsRoot() { + // Only root module provider configurations can have input. + return nil } - return nil + return ctx.ProviderInputConfig[pc.String()] } -func (ctx *BuiltinEvalContext) SetProviderInput(n string, c map[string]interface{}) { - providerPath := make([]string, len(ctx.Path())+1) - copy(providerPath, ctx.Path()) - providerPath[len(providerPath)-1] = n +func (ctx *BuiltinEvalContext) SetProviderInput(pc addrs.ProviderConfig, c map[string]cty.Value) { + absProvider := pc.Absolute(ctx.Path()) + + if !ctx.Path().IsRoot() { + // Only root module provider configurations can have input. + log.Printf("[WARN] BuiltinEvalContext: attempt to SetProviderInput for non-root module") + return + } // Save the configuration ctx.ProviderLock.Lock() - ctx.ProviderInputConfig[PathCacheKey(providerPath)] = c + ctx.ProviderInputConfig[absProvider.String()] = c ctx.ProviderLock.Unlock() } -func (ctx *BuiltinEvalContext) InitProvisioner( - n string) (ResourceProvisioner, error) { +func (ctx *BuiltinEvalContext) InitProvisioner(n string) (provisioners.Interface, error) { ctx.once.Do(ctx.init) // If we already initialized, it is an error @@ -182,10 +225,7 @@ func (ctx *BuiltinEvalContext) InitProvisioner( ctx.ProvisionerLock.Lock() defer ctx.ProvisionerLock.Unlock() - provPath := make([]string, len(ctx.Path())+1) - copy(provPath, ctx.Path()) - provPath[len(provPath)-1] = n - key := PathCacheKey(provPath) + key := PathObjectCacheKey(ctx.Path(), n) p, err := ctx.Components.ResourceProvisioner(n, key) if err != nil { @@ -193,20 +233,24 @@ func (ctx *BuiltinEvalContext) InitProvisioner( } ctx.ProvisionerCache[key] = p + return p, nil } -func (ctx *BuiltinEvalContext) Provisioner(n string) ResourceProvisioner { +func (ctx *BuiltinEvalContext) Provisioner(n string) provisioners.Interface { ctx.once.Do(ctx.init) ctx.ProvisionerLock.Lock() defer ctx.ProvisionerLock.Unlock() - provPath := make([]string, len(ctx.Path())+1) - copy(provPath, ctx.Path()) - provPath[len(provPath)-1] = n + key := PathObjectCacheKey(ctx.Path(), n) + return ctx.ProvisionerCache[key] +} + +func (ctx *BuiltinEvalContext) ProvisionerSchema(n string) *configschema.Block { + ctx.once.Do(ctx.init) - return ctx.ProvisionerCache[PathCacheKey(provPath)] + return ctx.Schemas.ProvisionerConfig(n) } func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error { @@ -215,106 +259,70 @@ func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error { ctx.ProvisionerLock.Lock() defer ctx.ProvisionerLock.Unlock() - provPath := make([]string, len(ctx.Path())+1) - copy(provPath, ctx.Path()) - provPath[len(provPath)-1] = n + key := PathObjectCacheKey(ctx.Path(), n) - var prov interface{} - prov = ctx.ProvisionerCache[PathCacheKey(provPath)] + prov := ctx.ProvisionerCache[key] if prov != nil { - if p, ok := prov.(ResourceProvisionerCloser); ok { - delete(ctx.ProvisionerCache, PathCacheKey(provPath)) - return p.Close() - } + return prov.Close() } return nil } -func (ctx *BuiltinEvalContext) Interpolate( - cfg *config.RawConfig, r *Resource) (*ResourceConfig, error) { - - if cfg != nil { - scope := &InterpolationScope{ - Path: ctx.Path(), - Resource: r, - } - - vs, err := ctx.Interpolater.Values(scope, cfg.Variables) - if err != nil { - return nil, err - } - - // Do the interpolation - if err := cfg.Interpolate(vs); err != nil { - return nil, err - } - } - - result := NewResourceConfig(cfg) - result.interpolateForce() - return result, nil +func (ctx *BuiltinEvalContext) EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + scope := ctx.EvaluationScope(self, keyData) + body, evalDiags := scope.ExpandBlock(body, schema) + diags = diags.Append(evalDiags) + val, evalDiags := scope.EvalBlock(body, schema) + diags = diags.Append(evalDiags) + return val, body, diags } -func (ctx *BuiltinEvalContext) InterpolateProvider( - pc *config.ProviderConfig, r *Resource) (*ResourceConfig, error) { - - var cfg *config.RawConfig - - if pc != nil && pc.RawConfig != nil { - scope := &InterpolationScope{ - Path: ctx.Path(), - Resource: r, - } - - cfg = pc.RawConfig - - vs, err := ctx.Interpolater.Values(scope, cfg.Variables) - if err != nil { - return nil, err - } +func (ctx *BuiltinEvalContext) EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) { + scope := ctx.EvaluationScope(self, EvalDataForNoInstanceKey) + return scope.EvalExpr(expr, wantType) +} - // Do the interpolation - if err := cfg.Interpolate(vs); err != nil { - return nil, err - } +func (ctx *BuiltinEvalContext) EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope { + data := &evaluationStateData{ + Evaluator: ctx.Evaluator, + ModulePath: ctx.PathValue, + InstanceKeyData: keyData, + Operation: ctx.Evaluator.Operation, } - - result := NewResourceConfig(cfg) - result.interpolateForce() - return result, nil + return ctx.Evaluator.Scope(data, self) } -func (ctx *BuiltinEvalContext) Path() []string { +func (ctx *BuiltinEvalContext) Path() addrs.ModuleInstance { return ctx.PathValue } -func (ctx *BuiltinEvalContext) SetVariables(n string, vs map[string]interface{}) { - ctx.InterpolaterVarLock.Lock() - defer ctx.InterpolaterVarLock.Unlock() +func (ctx *BuiltinEvalContext) SetModuleCallArguments(n addrs.ModuleCallInstance, vals map[string]cty.Value) { + ctx.VariableValuesLock.Lock() + defer ctx.VariableValuesLock.Unlock() - path := make([]string, len(ctx.Path())+1) - copy(path, ctx.Path()) - path[len(path)-1] = n - key := PathCacheKey(path) + childPath := n.ModuleInstance(ctx.PathValue) + key := childPath.String() - vars := ctx.InterpolaterVars[key] - if vars == nil { - vars = make(map[string]interface{}) - ctx.InterpolaterVars[key] = vars + args := ctx.VariableValues[key] + if args == nil { + args = make(map[string]cty.Value) + ctx.VariableValues[key] = vals + return } - for k, v := range vs { - vars[k] = v + for k, v := range vals { + args[k] = v } } -func (ctx *BuiltinEvalContext) Diff() (*Diff, *sync.RWMutex) { - return ctx.DiffValue, ctx.DiffLock +func (ctx *BuiltinEvalContext) Changes() *plans.ChangesSync { + return ctx.ChangesValue } -func (ctx *BuiltinEvalContext) State() (*State, *sync.RWMutex) { - return ctx.StateValue, ctx.StateLock +func (ctx *BuiltinEvalContext) State() *states.SyncState { + return ctx.StateValue } func (ctx *BuiltinEvalContext) init() { diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go index 646451795d..195ecc5c2f 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go @@ -1,9 +1,20 @@ package terraform import ( - "sync" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcldec" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/lang" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/provisioners" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) // MockEvalContext is a mock version of EvalContext that can be used @@ -20,43 +31,84 @@ type MockEvalContext struct { InputInput UIInput InitProviderCalled bool - InitProviderName string - InitProviderProvider ResourceProvider + InitProviderType string + InitProviderAddr addrs.ProviderConfig + InitProviderProvider providers.Interface InitProviderError error ProviderCalled bool - ProviderName string - ProviderProvider ResourceProvider + ProviderAddr addrs.AbsProviderConfig + ProviderProvider providers.Interface + + ProviderSchemaCalled bool + ProviderSchemaAddr addrs.AbsProviderConfig + ProviderSchemaSchema *ProviderSchema CloseProviderCalled bool - CloseProviderName string - CloseProviderProvider ResourceProvider + CloseProviderAddr addrs.ProviderConfig + CloseProviderProvider providers.Interface ProviderInputCalled bool - ProviderInputName string - ProviderInputConfig map[string]interface{} + ProviderInputAddr addrs.ProviderConfig + ProviderInputValues map[string]cty.Value SetProviderInputCalled bool - SetProviderInputName string - SetProviderInputConfig map[string]interface{} + SetProviderInputAddr addrs.ProviderConfig + SetProviderInputValues map[string]cty.Value ConfigureProviderCalled bool - ConfigureProviderName string - ConfigureProviderConfig *ResourceConfig - ConfigureProviderError error + ConfigureProviderAddr addrs.ProviderConfig + ConfigureProviderConfig cty.Value + ConfigureProviderDiags tfdiags.Diagnostics InitProvisionerCalled bool InitProvisionerName string - InitProvisionerProvisioner ResourceProvisioner + InitProvisionerProvisioner provisioners.Interface InitProvisionerError error ProvisionerCalled bool ProvisionerName string - ProvisionerProvisioner ResourceProvisioner + ProvisionerProvisioner provisioners.Interface + + ProvisionerSchemaCalled bool + ProvisionerSchemaName string + ProvisionerSchemaSchema *configschema.Block CloseProvisionerCalled bool CloseProvisionerName string - CloseProvisionerProvisioner ResourceProvisioner + CloseProvisionerProvisioner provisioners.Interface + + EvaluateBlockCalled bool + EvaluateBlockBody hcl.Body + EvaluateBlockSchema *configschema.Block + EvaluateBlockSelf addrs.Referenceable + EvaluateBlockKeyData InstanceKeyEvalData + EvaluateBlockResultFunc func( + body hcl.Body, + schema *configschema.Block, + self addrs.Referenceable, + keyData InstanceKeyEvalData, + ) (cty.Value, hcl.Body, tfdiags.Diagnostics) // overrides the other values below, if set + EvaluateBlockResult cty.Value + EvaluateBlockExpandedBody hcl.Body + EvaluateBlockDiags tfdiags.Diagnostics + + EvaluateExprCalled bool + EvaluateExprExpr hcl.Expression + EvaluateExprWantType cty.Type + EvaluateExprSelf addrs.Referenceable + EvaluateExprResultFunc func( + expr hcl.Expression, + wantType cty.Type, + self addrs.Referenceable, + ) (cty.Value, tfdiags.Diagnostics) // overrides the other values below, if set + EvaluateExprResult cty.Value + EvaluateExprDiags tfdiags.Diagnostics + + EvaluationScopeCalled bool + EvaluationScopeSelf addrs.Referenceable + EvaluationScopeKeyData InstanceKeyEvalData + EvaluationScopeScope *lang.Scope InterpolateCalled bool InterpolateConfig *config.RawConfig @@ -71,21 +123,22 @@ type MockEvalContext struct { InterpolateProviderError error PathCalled bool - PathPath []string + PathPath addrs.ModuleInstance - SetVariablesCalled bool - SetVariablesModule string - SetVariablesVariables map[string]interface{} + SetModuleCallArgumentsCalled bool + SetModuleCallArgumentsModule addrs.ModuleCallInstance + SetModuleCallArgumentsValues map[string]cty.Value - DiffCalled bool - DiffDiff *Diff - DiffLock *sync.RWMutex + ChangesCalled bool + ChangesChanges *plans.ChangesSync StateCalled bool - StateState *State - StateLock *sync.RWMutex + StateState *states.SyncState } +// MockEvalContext implements EvalContext +var _ EvalContext = (*MockEvalContext)(nil) + func (c *MockEvalContext) Stopped() <-chan struct{} { c.StoppedCalled = true return c.StoppedValue @@ -107,61 +160,157 @@ func (c *MockEvalContext) Input() UIInput { return c.InputInput } -func (c *MockEvalContext) InitProvider(t, n string) (ResourceProvider, error) { +func (c *MockEvalContext) InitProvider(t string, addr addrs.ProviderConfig) (providers.Interface, error) { c.InitProviderCalled = true - c.InitProviderName = n + c.InitProviderType = t + c.InitProviderAddr = addr return c.InitProviderProvider, c.InitProviderError } -func (c *MockEvalContext) Provider(n string) ResourceProvider { +func (c *MockEvalContext) Provider(addr addrs.AbsProviderConfig) providers.Interface { c.ProviderCalled = true - c.ProviderName = n + c.ProviderAddr = addr return c.ProviderProvider } -func (c *MockEvalContext) CloseProvider(n string) error { +func (c *MockEvalContext) ProviderSchema(addr addrs.AbsProviderConfig) *ProviderSchema { + c.ProviderSchemaCalled = true + c.ProviderSchemaAddr = addr + return c.ProviderSchemaSchema +} + +func (c *MockEvalContext) CloseProvider(addr addrs.ProviderConfig) error { c.CloseProviderCalled = true - c.CloseProviderName = n + c.CloseProviderAddr = addr return nil } -func (c *MockEvalContext) ConfigureProvider(n string, cfg *ResourceConfig) error { +func (c *MockEvalContext) ConfigureProvider(addr addrs.ProviderConfig, cfg cty.Value) tfdiags.Diagnostics { c.ConfigureProviderCalled = true - c.ConfigureProviderName = n + c.ConfigureProviderAddr = addr c.ConfigureProviderConfig = cfg - return c.ConfigureProviderError + return c.ConfigureProviderDiags } -func (c *MockEvalContext) ProviderInput(n string) map[string]interface{} { +func (c *MockEvalContext) ProviderInput(addr addrs.ProviderConfig) map[string]cty.Value { c.ProviderInputCalled = true - c.ProviderInputName = n - return c.ProviderInputConfig + c.ProviderInputAddr = addr + return c.ProviderInputValues } -func (c *MockEvalContext) SetProviderInput(n string, cfg map[string]interface{}) { +func (c *MockEvalContext) SetProviderInput(addr addrs.ProviderConfig, vals map[string]cty.Value) { c.SetProviderInputCalled = true - c.SetProviderInputName = n - c.SetProviderInputConfig = cfg + c.SetProviderInputAddr = addr + c.SetProviderInputValues = vals } -func (c *MockEvalContext) InitProvisioner(n string) (ResourceProvisioner, error) { +func (c *MockEvalContext) InitProvisioner(n string) (provisioners.Interface, error) { c.InitProvisionerCalled = true c.InitProvisionerName = n return c.InitProvisionerProvisioner, c.InitProvisionerError } -func (c *MockEvalContext) Provisioner(n string) ResourceProvisioner { +func (c *MockEvalContext) Provisioner(n string) provisioners.Interface { c.ProvisionerCalled = true c.ProvisionerName = n return c.ProvisionerProvisioner } +func (c *MockEvalContext) ProvisionerSchema(n string) *configschema.Block { + c.ProvisionerSchemaCalled = true + c.ProvisionerSchemaName = n + return c.ProvisionerSchemaSchema +} + func (c *MockEvalContext) CloseProvisioner(n string) error { c.CloseProvisionerCalled = true c.CloseProvisionerName = n return nil } +func (c *MockEvalContext) EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) { + c.EvaluateBlockCalled = true + c.EvaluateBlockBody = body + c.EvaluateBlockSchema = schema + c.EvaluateBlockSelf = self + c.EvaluateBlockKeyData = keyData + if c.EvaluateBlockResultFunc != nil { + return c.EvaluateBlockResultFunc(body, schema, self, keyData) + } + return c.EvaluateBlockResult, c.EvaluateBlockExpandedBody, c.EvaluateBlockDiags +} + +func (c *MockEvalContext) EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) { + c.EvaluateExprCalled = true + c.EvaluateExprExpr = expr + c.EvaluateExprWantType = wantType + c.EvaluateExprSelf = self + if c.EvaluateExprResultFunc != nil { + return c.EvaluateExprResultFunc(expr, wantType, self) + } + return c.EvaluateExprResult, c.EvaluateExprDiags +} + +// installSimpleEval is a helper to install a simple mock implementation of +// both EvaluateBlock and EvaluateExpr into the receiver. +// +// These default implementations will either evaluate the given input against +// the scope in field EvaluationScopeScope or, if it is nil, with no eval +// context at all so that only constant values may be used. +// +// This function overwrites any existing functions installed in fields +// EvaluateBlockResultFunc and EvaluateExprResultFunc. +func (c *MockEvalContext) installSimpleEval() { + c.EvaluateBlockResultFunc = func(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) { + if scope := c.EvaluationScopeScope; scope != nil { + // Fully-functional codepath. + var diags tfdiags.Diagnostics + body, diags = scope.ExpandBlock(body, schema) + if diags.HasErrors() { + return cty.DynamicVal, body, diags + } + val, evalDiags := c.EvaluationScopeScope.EvalBlock(body, schema) + diags = diags.Append(evalDiags) + if evalDiags.HasErrors() { + return cty.DynamicVal, body, diags + } + return val, body, diags + } + + // Fallback codepath supporting constant values only. + val, hclDiags := hcldec.Decode(body, schema.DecoderSpec(), nil) + return val, body, tfdiags.Diagnostics(nil).Append(hclDiags) + } + c.EvaluateExprResultFunc = func(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) { + if scope := c.EvaluationScopeScope; scope != nil { + // Fully-functional codepath. + return scope.EvalExpr(expr, wantType) + } + + // Fallback codepath supporting constant values only. + var diags tfdiags.Diagnostics + val, hclDiags := expr.Value(nil) + diags = diags.Append(hclDiags) + if hclDiags.HasErrors() { + return cty.DynamicVal, diags + } + var err error + val, err = convert.Convert(val, wantType) + if err != nil { + diags = diags.Append(err) + return cty.DynamicVal, diags + } + return val, diags + } +} + +func (c *MockEvalContext) EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope { + c.EvaluationScopeCalled = true + c.EvaluationScopeSelf = self + c.EvaluationScopeKeyData = keyData + return c.EvaluationScopeScope +} + func (c *MockEvalContext) Interpolate( config *config.RawConfig, resource *Resource) (*ResourceConfig, error) { c.InterpolateCalled = true @@ -178,23 +327,23 @@ func (c *MockEvalContext) InterpolateProvider( return c.InterpolateProviderConfigResult, c.InterpolateError } -func (c *MockEvalContext) Path() []string { +func (c *MockEvalContext) Path() addrs.ModuleInstance { c.PathCalled = true return c.PathPath } -func (c *MockEvalContext) SetVariables(n string, vs map[string]interface{}) { - c.SetVariablesCalled = true - c.SetVariablesModule = n - c.SetVariablesVariables = vs +func (c *MockEvalContext) SetModuleCallArguments(n addrs.ModuleCallInstance, values map[string]cty.Value) { + c.SetModuleCallArgumentsCalled = true + c.SetModuleCallArgumentsModule = n + c.SetModuleCallArgumentsValues = values } -func (c *MockEvalContext) Diff() (*Diff, *sync.RWMutex) { - c.DiffCalled = true - return c.DiffDiff, c.DiffLock +func (c *MockEvalContext) Changes() *plans.ChangesSync { + c.ChangesCalled = true + return c.ChangesChanges } -func (c *MockEvalContext) State() (*State, *sync.RWMutex) { +func (c *MockEvalContext) State() *states.SyncState { c.StateCalled = true - return c.StateState, c.StateLock + return c.StateState } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count.go index 2ae56a751c..9c15d7de18 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_count.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count.go @@ -1,58 +1,121 @@ package terraform import ( - "github.com/hashicorp/terraform/config" -) - -// EvalCountFixZeroOneBoundary is an EvalNode that fixes up the state -// when there is a resource count with zero/one boundary, i.e. fixing -// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa. -type EvalCountFixZeroOneBoundary struct { - Resource *config.Resource -} + "fmt" + "log" -// TODO: test -func (n *EvalCountFixZeroOneBoundary) Eval(ctx EvalContext) (interface{}, error) { - // Get the count, important for knowing whether we're supposed to - // be adding the zero, or trimming it. - count, err := n.Resource.Count() - if err != nil { - return nil, err - } + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" +) - // Figure what to look for and what to replace it with - hunt := n.Resource.Id() - replace := hunt + ".0" - if count < 2 { - hunt, replace = replace, hunt +// evaluateResourceCountExpression is our standard mechanism for interpreting an +// expression given for a "count" argument on a resource. This should be called +// from the DynamicExpand of a node representing a resource in order to +// determine the final count value. +// +// If the result is zero or positive and no error diagnostics are returned, then +// the result is the literal count value to use. +// +// If the result is -1, this indicates that the given expression is nil and so +// the "count" behavior should not be enabled for this resource at all. +// +// If error diagnostics are returned then the result is always the meaningless +// placeholder value -1, except in one case: if the count expression evaluates +// to an unknown number value then the result is zero, allowing this situation +// to be treated by the caller as special if needed. For example, an early +// graph walk may wish to just silently skip resources with unknown counts +// to allow them to be dealt with in a later graph walk where more information +// is available. +func evaluateResourceCountExpression(expr hcl.Expression, ctx EvalContext) (int, tfdiags.Diagnostics) { + if expr == nil { + return -1, nil } - state, lock := ctx.State() + var diags tfdiags.Diagnostics + var count int - // Get a lock so we can access this instance and potentially make - // changes to it. - lock.Lock() - defer lock.Unlock() - - // Look for the module state. If we don't have one, then it doesn't matter. - mod := state.ModuleByPath(ctx.Path()) - if mod == nil { - return nil, nil + countVal, countDiags := ctx.EvaluateExpr(expr, cty.Number, nil) + diags = diags.Append(countDiags) + if diags.HasErrors() { + return -1, diags } - // Look for the resource state. If we don't have one, then it is okay. - rs, ok := mod.Resources[hunt] - if !ok { - return nil, nil + switch { + case countVal.IsNull(): + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: `The given "count" argument value is null. An integer is required.`, + Subject: expr.Range().Ptr(), + }) + return -1, diags + case !countVal.IsKnown(): + // Currently this is a rather bad outcome from a UX standpoint, since we have + // no real mechanism to deal with this situation and all we can do is produce + // an error message. + // FIXME: In future, implement a built-in mechanism for deferring changes that + // can't yet be predicted, and use it to guide the user through several + // plan/apply steps until the desired configuration is eventually reached. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: `The "count" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created. To work around this, use the -target argument to first apply only the resources that the count depends on.`, + Subject: expr.Range().Ptr(), + }) + // We return zero+errors in this one case to allow callers to handle + // an unknown count as special. This is rarely necessary, but is used + // by the validate walk in particular so that it can just skip + // validation in this case, assuming a later walk will take care of it. + return 0, diags } - // If the replacement key exists, we just keep both - if _, ok := mod.Resources[replace]; ok { - return nil, nil + err := gocty.FromCtyValue(countVal, &count) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err), + Subject: expr.Range().Ptr(), + }) + return -1, diags + } + if count < 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: `The given "count" argument value is unsuitable: negative numbers are not supported.`, + Subject: expr.Range().Ptr(), + }) + return -1, diags } - mod.Resources[replace] = rs - delete(mod.Resources, hunt) + return count, diags +} - return nil, nil +// fixResourceCountSetTransition is a helper function to fix up the state when a +// resource transitions its "count" from being set to unset or vice-versa, +// treating a 0-key and a no-key instance as aliases for one another across +// the transition. +// +// The correct time to call this function is in the DynamicExpand method for +// a node representing a resource, just after evaluating the count with +// evaluateResourceCountExpression, and before any other analysis of the +// state such as orphan detection. +// +// This function calls methods on the given EvalContext to update the current +// state in-place, if necessary. It is a no-op if there is no count transition +// taking place. +// +// Since the state is modified in-place, this function must take a writer lock +// on the state. The caller must therefore not also be holding a state lock, +// or this function will block forever awaiting the lock. +func fixResourceCountSetTransition(ctx EvalContext, addr addrs.AbsResource, countEnabled bool) { + state := ctx.State() + changed := state.MaybeFixUpResourceInstanceAddressForCount(addr, countEnabled) + if changed { + log.Printf("[TRACE] renamed first %s instance in transient state due to count argument change", addr) + } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go index 91e2b904e9..647c58d1ed 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go @@ -1,7 +1,11 @@ package terraform import ( + "fmt" "log" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" ) // EvalCountFixZeroOneBoundaryGlobal is an EvalNode that fixes up the state @@ -9,22 +13,34 @@ import ( // a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa. // // This works on the global state. -type EvalCountFixZeroOneBoundaryGlobal struct{} +type EvalCountFixZeroOneBoundaryGlobal struct { + Config *configs.Config +} // TODO: test func (n *EvalCountFixZeroOneBoundaryGlobal) Eval(ctx EvalContext) (interface{}, error) { - // Get the state and lock it since we'll potentially modify it - state, lock := ctx.State() - lock.Lock() - defer lock.Unlock() - - // Prune the state since we require a clean state to work - state.prune() - - // Go through each modules since the boundaries are restricted to a - // module scope. + // We'll temporarily lock the state to grab the modules, then work on each + // one separately while taking a lock again for each separate resource. + // This means that if another caller concurrently adds a module here while + // we're working then we won't update it, but that's no worse than the + // concurrent writer blocking for our entire fixup process and _then_ + // adding a new module, and in practice the graph node associated with + // this eval depends on everything else in the graph anyway, so there + // should not be concurrent writers. + state := ctx.State().Lock() + moduleAddrs := make([]addrs.ModuleInstance, 0, len(state.Modules)) for _, m := range state.Modules { - if err := n.fixModule(m); err != nil { + moduleAddrs = append(moduleAddrs, m.Addr) + } + ctx.State().Unlock() + + for _, addr := range moduleAddrs { + cfg := n.Config.DescendentForInstance(addr) + if cfg == nil { + log.Printf("[WARN] Not fixing up EachModes for %s because it has no config", addr) + continue + } + if err := n.fixModule(ctx, addr); err != nil { return nil, err } } @@ -32,46 +48,29 @@ func (n *EvalCountFixZeroOneBoundaryGlobal) Eval(ctx EvalContext) (interface{}, return nil, nil } -func (n *EvalCountFixZeroOneBoundaryGlobal) fixModule(m *ModuleState) error { - // Counts keeps track of keys and their counts - counts := make(map[string]int) - for k, _ := range m.Resources { - // Parse the key - key, err := ParseResourceStateKey(k) - if err != nil { - return err - } - - // Set the index to -1 so that we can keep count - key.Index = -1 - - // Increment - counts[key.String()]++ +func (n *EvalCountFixZeroOneBoundaryGlobal) fixModule(ctx EvalContext, moduleAddr addrs.ModuleInstance) error { + ms := ctx.State().Module(moduleAddr) + cfg := n.Config.DescendentForInstance(moduleAddr) + if ms == nil { + // Theoretically possible for a concurrent writer to delete a module + // while we're running, but in practice the graph node that called us + // depends on everything else in the graph and so there can never + // be a concurrent writer. + return fmt.Errorf("[WARN] no state found for %s while trying to fix up EachModes", moduleAddr) + } + if cfg == nil { + return fmt.Errorf("[WARN] no config found for %s while trying to fix up EachModes", moduleAddr) } - // Go through the counts and do the fixup for each resource - for raw, count := range counts { - // Search and replace this resource - search := raw - replace := raw + ".0" - if count < 2 { - search, replace = replace, search - } - log.Printf("[TRACE] EvalCountFixZeroOneBoundaryGlobal: count %d, search %q, replace %q", count, search, replace) - - // Look for the resource state. If we don't have one, then it is okay. - rs, ok := m.Resources[search] - if !ok { - continue - } - - // If the replacement key exists, we just keep both - if _, ok := m.Resources[replace]; ok { + for _, r := range ms.Resources { + addr := r.Addr.Absolute(moduleAddr) + rCfg := cfg.Module.ResourceByAddr(r.Addr) + if rCfg == nil { + log.Printf("[WARN] Not fixing up EachModes for %s because it has no config", addr) continue } - - m.Resources[replace] = rs - delete(m.Resources, search) + hasCount := rCfg.Count != nil + fixResourceCountSetTransition(ctx, addr, hasCount) } return nil diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go index 26205ce51e..461a0b508b 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go @@ -1,92 +1,114 @@ package terraform import ( + "bytes" "fmt" "log" + "reflect" "strings" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/version" + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/plans/objchange" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) -// EvalCompareDiff is an EvalNode implementation that compares two diffs -// and errors if the diffs are not equal. -type EvalCompareDiff struct { - Info *InstanceInfo - One, Two **InstanceDiff +// EvalCheckPlannedChange is an EvalNode implementation that produces errors +// if the _actual_ expected value is not compatible with what was recorded +// in the plan. +// +// Errors here are most often indicative of a bug in the provider, so our +// error messages will report with that in mind. It's also possible that +// there's a bug in Terraform's Core's own "proposed new value" code in +// EvalDiff. +type EvalCheckPlannedChange struct { + Addr addrs.ResourceInstance + ProviderAddr addrs.AbsProviderConfig + ProviderSchema **ProviderSchema + + // We take ResourceInstanceChange objects here just because that's what's + // convenient to pass in from the evaltree implementation, but we really + // only look at the "After" value of each change. + Planned, Actual **plans.ResourceInstanceChange } -// TODO: test -func (n *EvalCompareDiff) Eval(ctx EvalContext) (interface{}, error) { - one, two := *n.One, *n.Two - - // If either are nil, let them be empty - if one == nil { - one = new(InstanceDiff) - one.init() - } - if two == nil { - two = new(InstanceDiff) - two.init() - } - oneId, _ := one.GetAttribute("id") - twoId, _ := two.GetAttribute("id") - one.DelAttribute("id") - two.DelAttribute("id") - defer func() { - if oneId != nil { - one.SetAttribute("id", oneId) - } - if twoId != nil { - two.SetAttribute("id", twoId) - } - }() - - if same, reason := one.Same(two); !same { - log.Printf("[ERROR] %s: diffs didn't match", n.Info.Id) - log.Printf("[ERROR] %s: reason: %s", n.Info.Id, reason) - log.Printf("[ERROR] %s: diff one: %#v", n.Info.Id, one) - log.Printf("[ERROR] %s: diff two: %#v", n.Info.Id, two) - return nil, fmt.Errorf( - "%s: diffs didn't match during apply. This is a bug with "+ - "Terraform and should be reported as a GitHub Issue.\n"+ - "\n"+ - "Please include the following information in your report:\n"+ - "\n"+ - " Terraform Version: %s\n"+ - " Resource ID: %s\n"+ - " Mismatch reason: %s\n"+ - " Diff One (usually from plan): %#v\n"+ - " Diff Two (usually from apply): %#v\n"+ - "\n"+ - "Also include as much context as you can about your config, state, "+ - "and the steps you performed to trigger this error.\n", - n.Info.Id, version.Version, n.Info.Id, reason, one, two) +func (n *EvalCheckPlannedChange) Eval(ctx EvalContext) (interface{}, error) { + providerSchema := *n.ProviderSchema + plannedChange := *n.Planned + actualChange := *n.Actual + + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider does not support %q", n.Addr.Resource.Type) + } + + var diags tfdiags.Diagnostics + absAddr := n.Addr.Absolute(ctx.Path()) + + log.Printf("[TRACE] EvalCheckPlannedChange: Verifying that actual change (action %s) matches planned change (action %s)", actualChange.Action, plannedChange.Action) + + if plannedChange.Action != actualChange.Action { + switch { + case plannedChange.Action == plans.Update && actualChange.Action == plans.NoOp: + // It's okay for an update to become a NoOp once we've filled in + // all of the unknown values, since the final values might actually + // match what was there before after all. + log.Printf("[DEBUG] After incorporating new values learned so far during apply, %s change has become NoOp", absAddr) + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced inconsistent final plan", + fmt.Sprintf( + "When expanding the plan for %s to include new values learned so far during apply, provider %q changed the planned action from %s to %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + absAddr, n.ProviderAddr.ProviderConfig.Type, + plannedChange.Action, actualChange.Action, + ), + )) + } } - return nil, nil + errs := objchange.AssertObjectCompatible(schema, plannedChange.After, actualChange.After) + for _, err := range errs { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced inconsistent final plan", + fmt.Sprintf( + "When expanding the plan for %s to include new values learned so far during apply, provider %q produced an invalid new value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + absAddr, n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatError(err), + ), + )) + } + return nil, diags.Err() } -// EvalDiff is an EvalNode implementation that does a refresh for -// a resource. +// EvalDiff is an EvalNode implementation that detects changes for a given +// resource instance. type EvalDiff struct { - Name string - Info *InstanceInfo - Config **ResourceConfig - Provider *ResourceProvider - Diff **InstanceDiff - State **InstanceState - OutputDiff **InstanceDiff - OutputState **InstanceState - - // Resource is needed to fetch the ignore_changes list so we can - // filter user-requested ignored attributes from the diff. - Resource *config.Resource - - // Stub is used to flag the generated InstanceDiff as a stub. This is used to - // ensure that the node exists to perform interpolations and generate - // computed paths off of, but not as an actual diff where resouces should be - // counted, and not as a diff that should be acted on. + Addr addrs.ResourceInstance + Config *configs.Resource + Provider *providers.Interface + ProviderAddr addrs.AbsProviderConfig + ProviderSchema **ProviderSchema + State **states.ResourceInstanceObject + PreviousDiff **plans.ResourceInstanceChange + + // CreateBeforeDestroy is set if either the resource's own config sets + // create_before_destroy explicitly or if dependencies have forced the + // resource to be handled as create_before_destroy in order to avoid + // a dependency cycle. + CreateBeforeDestroy bool + + OutputChange **plans.ResourceInstanceChange + OutputValue *cty.Value + OutputState **states.ResourceInstanceObject + Stub bool } @@ -95,81 +117,303 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) { state := *n.State config := *n.Config provider := *n.Provider + providerSchema := *n.ProviderSchema + + if providerSchema == nil { + return nil, fmt.Errorf("provider schema is unavailable for %s", n.Addr) + } + if n.ProviderAddr.ProviderConfig.Type == "" { + panic(fmt.Sprintf("EvalDiff for %s does not have ProviderAddr set", n.Addr.Absolute(ctx.Path()))) + } + + var diags tfdiags.Diagnostics + + // Evaluate the configuration + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) + } + keyData := EvalDataForInstanceKey(n.Addr.Key) + configVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, nil, keyData) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, diags.Err() + } + + absAddr := n.Addr.Absolute(ctx.Path()) + var priorVal cty.Value + var priorValTainted cty.Value + var priorPrivate []byte + if state != nil { + if state.Status != states.ObjectTainted { + priorVal = state.Value + priorPrivate = state.Private + } else { + // If the prior state is tainted then we'll proceed below like + // we're creating an entirely new object, but then turn it into + // a synthetic "Replace" change at the end, creating the same + // result as if the provider had marked at least one argument + // change as "requires replacement". + priorValTainted = state.Value + priorVal = cty.NullVal(schema.ImpliedType()) + } + } else { + priorVal = cty.NullVal(schema.ImpliedType()) + } + + proposedNewVal := objchange.ProposedNewObject(schema, priorVal, configVal) // Call pre-diff hook if !n.Stub { err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff(n.Info, state) + return h.PreDiff(absAddr, states.CurrentGen, priorVal, proposedNewVal) }) if err != nil { return nil, err } } - // The state for the diff must never be nil - diffState := state - if diffState == nil { - diffState = new(InstanceState) + // The provider gets an opportunity to customize the proposed new value, + // which in turn produces the _planned_ new value. + resp := provider.PlanResourceChange(providers.PlanResourceChangeRequest{ + TypeName: n.Addr.Resource.Type, + Config: configVal, + PriorState: priorVal, + ProposedNewState: proposedNewVal, + PriorPrivate: priorPrivate, + }) + diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config)) + if diags.HasErrors() { + return nil, diags.Err() + } + + plannedNewVal := resp.PlannedState + plannedPrivate := resp.PlannedPrivate + + if plannedNewVal == cty.NilVal { + // Should never happen. Since real-world providers return via RPC a nil + // is always a bug in the client-side stub. This is more likely caused + // by an incompletely-configured mock provider in tests, though. + panic(fmt.Sprintf("PlanResourceChange of %s produced nil value", absAddr.String())) + } + + // We allow the planned new value to disagree with configuration _values_ + // here, since that allows the provider to do special logic like a + // DiffSuppressFunc, but we still require that the provider produces + // a value whose type conforms to the schema. + for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid plan", + fmt.Sprintf( + "Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), + ), + )) + } + if diags.HasErrors() { + return nil, diags.Err() + } + + if errs := objchange.AssertPlanValid(schema, priorVal, configVal, plannedNewVal); len(errs) > 0 { + if resp.LegacyTypeSystem { + // The shimming of the old type system in the legacy SDK is not precise + // enough to pass this consistency check, so we'll give it a pass here, + // but we will generate a warning about it so that we are more likely + // to notice in the logs if an inconsistency beyond the type system + // leads to a downstream provider failure. + var buf strings.Builder + fmt.Fprintf(&buf, "[WARN] Provider %q produced an invalid plan for %s, but we are tolerating it because it is using the legacy plugin SDK.\n The following problems may be the cause of any confusing errors from downstream operations:", n.ProviderAddr.ProviderConfig.Type, absAddr) + for _, err := range errs { + fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err)) + } + log.Print(buf.String()) + } else { + for _, err := range errs { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid plan", + fmt.Sprintf( + "Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), + ), + )) + } + return nil, diags.Err() + } } - diffState.init() - // Diff! - diff, err := provider.Diff(n.Info, diffState, config) - if err != nil { - return nil, err - } - if diff == nil { - diff = new(InstanceDiff) + { + var moreDiags tfdiags.Diagnostics + plannedNewVal, moreDiags = n.processIgnoreChanges(priorVal, plannedNewVal) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + return nil, diags.Err() + } } - // Set DestroyDeposed if we have deposed instances - _, err = readInstanceFromState(ctx, n.Name, nil, func(rs *ResourceState) (*InstanceState, error) { - if len(rs.Deposed) > 0 { - diff.DestroyDeposed = true - } + // The provider produces a list of paths to attributes whose changes mean + // that we must replace rather than update an existing remote object. + // However, we only need to do that if the identified attributes _have_ + // actually changed -- particularly after we may have undone some of the + // changes in processIgnoreChanges -- so now we'll filter that list to + // include only where changes are detected. + reqRep := cty.NewPathSet() + if len(resp.RequiresReplace) > 0 { + for _, path := range resp.RequiresReplace { + if priorVal.IsNull() { + // If prior is null then we don't expect any RequiresReplace at all, + // because this is a Create action. + continue + } - return nil, nil - }) - if err != nil { - return nil, err - } + priorChangedVal, priorPathDiags := hcl.ApplyPath(priorVal, path, nil) + plannedChangedVal, plannedPathDiags := hcl.ApplyPath(plannedNewVal, path, nil) + if plannedPathDiags.HasErrors() && priorPathDiags.HasErrors() { + // This means the path was invalid in both the prior and new + // values, which is an error with the provider itself. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid plan", + fmt.Sprintf( + "Provider %q has indicated \"requires replacement\" on %s for a non-existent attribute path %#v.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, absAddr, path, + ), + )) + continue + } - // Preserve the DestroyTainted flag - if n.Diff != nil { - diff.SetTainted((*n.Diff).GetDestroyTainted()) - } + // Make sure we have valid Values for both values. + // Note: if the opposing value was of the type + // cty.DynamicPseudoType, the type assigned here may not exactly + // match the schema. This is fine here, since we're only going to + // check for equality, but if the NullVal is to be used, we need to + // check the schema for th true type. + switch { + case priorChangedVal == cty.NilVal && plannedChangedVal == cty.NilVal: + // this should never happen without ApplyPath errors above + panic("requires replace path returned 2 nil values") + case priorChangedVal == cty.NilVal: + priorChangedVal = cty.NullVal(plannedChangedVal.Type()) + case plannedChangedVal == cty.NilVal: + plannedChangedVal = cty.NullVal(priorChangedVal.Type()) + } - // Require a destroy if there is an ID and it requires new. - if diff.RequiresNew() && state != nil && state.ID != "" { - diff.SetDestroy(true) + eqV := plannedChangedVal.Equals(priorChangedVal) + if !eqV.IsKnown() || eqV.False() { + reqRep.Add(path) + } + } + if diags.HasErrors() { + return nil, diags.Err() + } } - // If we're creating a new resource, compute its ID - if diff.RequiresNew() || state == nil || state.ID == "" { - var oldID string - if state != nil { - oldID = state.Attributes["id"] + eqV := plannedNewVal.Equals(priorVal) + eq := eqV.IsKnown() && eqV.True() + + var action plans.Action + switch { + case priorVal.IsNull(): + action = plans.Create + case eq: + action = plans.NoOp + case !reqRep.Empty(): + // If there are any "requires replace" paths left _after our filtering + // above_ then this is a replace action. + if n.CreateBeforeDestroy { + action = plans.CreateThenDelete + } else { + action = plans.DeleteThenCreate } - - // Add diff to compute new ID - diff.init() - diff.SetAttribute("id", &ResourceAttrDiff{ - Old: oldID, - NewComputed: true, - RequiresNew: true, - Type: DiffAttrOutput, + default: + action = plans.Update + // "Delete" is never chosen here, because deletion plans are always + // created more directly elsewhere, such as in "orphan" handling. + } + + if action.IsReplace() { + // In this strange situation we want to produce a change object that + // shows our real prior object but has a _new_ object that is built + // from a null prior object, since we're going to delete the one + // that has all the computed values on it. + // + // Therefore we'll ask the provider to plan again here, giving it + // a null object for the prior, and then we'll meld that with the + // _actual_ prior state to produce a correctly-shaped replace change. + // The resulting change should show any computed attributes changing + // from known prior values to unknown values, unless the provider is + // able to predict new values for any of these computed attributes. + nullPriorVal := cty.NullVal(schema.ImpliedType()) + + // create a new proposed value from the null state and the config + proposedNewVal = objchange.ProposedNewObject(schema, nullPriorVal, configVal) + + resp = provider.PlanResourceChange(providers.PlanResourceChangeRequest{ + TypeName: n.Addr.Resource.Type, + Config: configVal, + PriorState: nullPriorVal, + ProposedNewState: proposedNewVal, + PriorPrivate: plannedPrivate, }) + // We need to tread carefully here, since if there are any warnings + // in here they probably also came out of our previous call to + // PlanResourceChange above, and so we don't want to repeat them. + // Consequently, we break from the usual pattern here and only + // append these new diagnostics if there's at least one error inside. + if resp.Diagnostics.HasErrors() { + diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config)) + return nil, diags.Err() + } + plannedNewVal = resp.PlannedState + plannedPrivate = resp.PlannedPrivate + for _, err := range schema.ImpliedType().TestConformance(plannedNewVal.Type()) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid plan", + fmt.Sprintf( + "Provider %q planned an invalid value for %s%s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, absAddr, tfdiags.FormatError(err), + ), + )) + } + if diags.HasErrors() { + return nil, diags.Err() + } } - // filter out ignored resources - if err := n.processIgnoreChanges(diff); err != nil { - return nil, err + // If our prior value was tainted then we actually want this to appear + // as a replace change, even though so far we've been treating it as a + // create. + if action == plans.Create && priorValTainted != cty.NilVal { + if n.CreateBeforeDestroy { + action = plans.CreateThenDelete + } else { + action = plans.DeleteThenCreate + } + priorVal = priorValTainted + } + + // As a special case, if we have a previous diff (presumably from the plan + // phases, whereas we're now in the apply phase) and it was for a replace, + // we've already deleted the original object from state by the time we + // get here and so we would've ended up with a _create_ action this time, + // which we now need to paper over to get a result consistent with what + // we originally intended. + if n.PreviousDiff != nil { + prevChange := *n.PreviousDiff + if prevChange.Action.IsReplace() && action == plans.Create { + log.Printf("[TRACE] EvalDiff: %s treating Create change as %s change to match with earlier plan", absAddr, prevChange.Action) + action = prevChange.Action + priorVal = prevChange.Before + } } // Call post-refresh hook if !n.Stub { - err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostDiff(n.Info, diff) + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostDiff(absAddr, states.CurrentGen, action, priorVal, plannedNewVal) }) if err != nil { return nil, err @@ -177,30 +421,135 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) { } // Update our output if we care - if n.OutputDiff != nil { - *n.OutputDiff = diff + if n.OutputChange != nil { + *n.OutputChange = &plans.ResourceInstanceChange{ + Addr: absAddr, + Private: plannedPrivate, + ProviderAddr: n.ProviderAddr, + Change: plans.Change{ + Action: action, + Before: priorVal, + After: plannedNewVal, + }, + RequiredReplace: reqRep, + } + } + + if n.OutputValue != nil { + *n.OutputValue = configVal } // Update the state if we care if n.OutputState != nil { - *n.OutputState = state - - // Merge our state so that the state is updated with our plan - if !diff.Empty() && n.OutputState != nil { - *n.OutputState = state.MergeDiff(diff) + *n.OutputState = &states.ResourceInstanceObject{ + // We use the special "planned" status here to note that this + // object's value is not yet complete. Objects with this status + // cannot be used during expression evaluation, so the caller + // must _also_ record the returned change in the active plan, + // which the expression evaluator will use in preference to this + // incomplete value recorded in the state. + Status: states.ObjectPlanned, + Value: plannedNewVal, } } return nil, nil } -func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error { - if diff == nil || n.Resource == nil || n.Resource.Id() == "" { +func (n *EvalDiff) processIgnoreChanges(prior, proposed cty.Value) (cty.Value, tfdiags.Diagnostics) { + // ignore_changes only applies when an object already exists, since we + // can't ignore changes to a thing we've not created yet. + if prior.IsNull() { + return proposed, nil + } + + ignoreChanges := n.Config.Managed.IgnoreChanges + ignoreAll := n.Config.Managed.IgnoreAllChanges + + if len(ignoreChanges) == 0 && !ignoreAll { + return proposed, nil + } + if ignoreAll { + return prior, nil + } + if prior.IsNull() || proposed.IsNull() { + // Ignore changes doesn't apply when we're creating for the first time. + // Proposed should never be null here, but if it is then we'll just let it be. + return proposed, nil + } + + return processIgnoreChangesIndividual(prior, proposed, ignoreChanges) +} + +func processIgnoreChangesIndividual(prior, proposed cty.Value, ignoreChanges []hcl.Traversal) (cty.Value, tfdiags.Diagnostics) { + // When we walk below we will be using cty.Path values for comparison, so + // we'll convert our traversals here so we can compare more easily. + ignoreChangesPath := make([]cty.Path, len(ignoreChanges)) + for i, traversal := range ignoreChanges { + path := make(cty.Path, len(traversal)) + for si, step := range traversal { + switch ts := step.(type) { + case hcl.TraverseRoot: + path[si] = cty.GetAttrStep{ + Name: ts.Name, + } + case hcl.TraverseAttr: + path[si] = cty.GetAttrStep{ + Name: ts.Name, + } + case hcl.TraverseIndex: + path[si] = cty.IndexStep{ + Key: ts.Key, + } + default: + panic(fmt.Sprintf("unsupported traversal step %#v", step)) + } + } + ignoreChangesPath[i] = path + } + + var diags tfdiags.Diagnostics + ret, _ := cty.Transform(proposed, func(path cty.Path, v cty.Value) (cty.Value, error) { + // First we must see if this is a path that's being ignored at all. + // We're looking for an exact match here because this walk will visit + // leaf values first and then their containers, and we want to do + // the "ignore" transform once we reach the point indicated, throwing + // away any deeper values we already produced at that point. + var ignoreTraversal hcl.Traversal + for i, candidate := range ignoreChangesPath { + if reflect.DeepEqual(path, candidate) { + ignoreTraversal = ignoreChanges[i] + } + } + if ignoreTraversal == nil { + return v, nil + } + + // If we're able to follow the same path through the prior value, + // we'll take the value there instead, effectively undoing the + // change that was planned. + priorV, diags := hcl.ApplyPath(prior, path, nil) + if diags.HasErrors() { + // We just ignore the errors and move on here, since we assume it's + // just because the prior value was a slightly-different shape. + // It could potentially also be that the traversal doesn't match + // the schema, but we should've caught that during the validate + // walk if so. + return v, nil + } + return priorV, nil + }) + return ret, diags +} + +func (n *EvalDiff) processIgnoreChangesOld(diff *InstanceDiff) error { + if diff == nil || n.Config == nil || n.Config.Managed == nil { return nil } - ignoreChanges := n.Resource.Lifecycle.IgnoreChanges + ignoreChanges := n.Config.Managed.IgnoreChanges + ignoreAll := n.Config.Managed.IgnoreAllChanges - if len(ignoreChanges) == 0 { + if len(ignoreChanges) == 0 && !ignoreAll { return nil } @@ -220,9 +569,14 @@ func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error { // get the complete set of keys we want to ignore ignorableAttrKeys := make(map[string]bool) - for _, ignoredKey := range ignoreChanges { - for k := range attrs { - if ignoredKey == "*" || strings.HasPrefix(k, ignoredKey) { + for k := range attrs { + if ignoreAll { + ignorableAttrKeys[k] = true + continue + } + for _, ignoredTraversal := range ignoreChanges { + ignoredKey := legacyFlatmapKeyForTraversal(ignoredTraversal) + if k == ignoredKey || strings.HasPrefix(k, ignoredKey+".") { ignorableAttrKeys[k] = true } } @@ -285,14 +639,56 @@ func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error { // If we didn't hit any of our early exit conditions, we can filter the diff. for k := range ignorableAttrKeys { - log.Printf("[DEBUG] [EvalIgnoreChanges] %s - Ignoring diff attribute: %s", - n.Resource.Id(), k) + log.Printf("[DEBUG] [EvalIgnoreChanges] %s: Ignoring diff attribute: %s", n.Addr.String(), k) diff.DelAttribute(k) } return nil } +// legacyFlagmapKeyForTraversal constructs a key string compatible with what +// the flatmap package would generate for an attribute addressable by the given +// traversal. +// +// This is used only to shim references to attributes within the diff and +// state structures, which have not (at the time of writing) yet been updated +// to use the newer HCL-based representations. +func legacyFlatmapKeyForTraversal(traversal hcl.Traversal) string { + var buf bytes.Buffer + first := true + for _, step := range traversal { + if !first { + buf.WriteByte('.') + } + switch ts := step.(type) { + case hcl.TraverseRoot: + buf.WriteString(ts.Name) + case hcl.TraverseAttr: + buf.WriteString(ts.Name) + case hcl.TraverseIndex: + val := ts.Key + switch val.Type() { + case cty.Number: + bf := val.AsBigFloat() + buf.WriteString(bf.String()) + case cty.String: + s := val.AsString() + buf.WriteString(s) + default: + // should never happen, since no other types appear in + // traversals in practice. + buf.WriteByte('?') + } + default: + // should never happen, since we've covered all of the types + // that show up in parsed traversals in practice. + buf.WriteByte('?') + } + first = false + } + return buf.String() +} + // a group of key-*ResourceAttrDiff pairs from the same flatmapped container type flatAttrDiff map[string]*ResourceAttrDiff @@ -343,159 +739,213 @@ func groupContainers(d *InstanceDiff) map[string]flatAttrDiff { // EvalDiffDestroy is an EvalNode implementation that returns a plain // destroy diff. type EvalDiffDestroy struct { - Info *InstanceInfo - State **InstanceState - Output **InstanceDiff + Addr addrs.ResourceInstance + DeposedKey states.DeposedKey + State **states.ResourceInstanceObject + ProviderAddr addrs.AbsProviderConfig + + Output **plans.ResourceInstanceChange + OutputState **states.ResourceInstanceObject } // TODO: test func (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) { + absAddr := n.Addr.Absolute(ctx.Path()) state := *n.State - // If there is no state or we don't have an ID, we're already destroyed - if state == nil || state.ID == "" { + if n.ProviderAddr.ProviderConfig.Type == "" { + if n.DeposedKey == "" { + panic(fmt.Sprintf("EvalDiffDestroy for %s does not have ProviderAddr set", absAddr)) + } else { + panic(fmt.Sprintf("EvalDiffDestroy for %s (deposed %s) does not have ProviderAddr set", absAddr, n.DeposedKey)) + } + } + + // If there is no state or our attributes object is null then we're already + // destroyed. + if state == nil || state.Value.IsNull() { return nil, nil } // Call pre-diff hook err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff(n.Info, state) + return h.PreDiff( + absAddr, n.DeposedKey.Generation(), + state.Value, + cty.NullVal(cty.DynamicPseudoType), + ) }) if err != nil { return nil, err } - // The diff - diff := &InstanceDiff{Destroy: true} + // Change is always the same for a destroy. We don't need the provider's + // help for this one. + // TODO: Should we give the provider an opportunity to veto this? + change := &plans.ResourceInstanceChange{ + Addr: absAddr, + DeposedKey: n.DeposedKey, + Change: plans.Change{ + Action: plans.Delete, + Before: state.Value, + After: cty.NullVal(cty.DynamicPseudoType), + }, + ProviderAddr: n.ProviderAddr, + } // Call post-diff hook err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostDiff(n.Info, diff) + return h.PostDiff( + absAddr, + n.DeposedKey.Generation(), + change.Action, + change.Before, + change.After, + ) }) if err != nil { return nil, err } // Update our output - *n.Output = diff - - return nil, nil -} - -// EvalDiffDestroyModule is an EvalNode implementation that writes the diff to -// the full diff. -type EvalDiffDestroyModule struct { - Path []string -} - -// TODO: test -func (n *EvalDiffDestroyModule) Eval(ctx EvalContext) (interface{}, error) { - diff, lock := ctx.Diff() - - // Acquire the lock so that we can do this safely concurrently - lock.Lock() - defer lock.Unlock() + *n.Output = change - // Write the diff - modDiff := diff.ModuleByPath(n.Path) - if modDiff == nil { - modDiff = diff.AddModule(n.Path) + if n.OutputState != nil { + // Record our proposed new state, which is nil because we're destroying. + *n.OutputState = nil } - modDiff.Destroy = true return nil, nil } -// EvalFilterDiff is an EvalNode implementation that filters the diff -// according to some filter. -type EvalFilterDiff struct { - // Input and output - Diff **InstanceDiff - Output **InstanceDiff - - // Destroy, if true, will only include a destroy diff if it is set. - Destroy bool +// EvalReduceDiff is an EvalNode implementation that takes a planned resource +// instance change as might be produced by EvalDiff or EvalDiffDestroy and +// "simplifies" it to a single atomic action to be performed by a specific +// graph node. +// +// Callers must specify whether they are a destroy node or a regular apply +// node. If the result is NoOp then the given change requires no action for +// the specific graph node calling this and so evaluation of the that graph +// node should exit early and take no action. +// +// The object written to OutChange may either be identical to InChange or +// a new change object derived from InChange. Because of the former case, the +// caller must not mutate the object returned in OutChange. +type EvalReduceDiff struct { + Addr addrs.ResourceInstance + InChange **plans.ResourceInstanceChange + Destroy bool + OutChange **plans.ResourceInstanceChange } -func (n *EvalFilterDiff) Eval(ctx EvalContext) (interface{}, error) { - if *n.Diff == nil { - return nil, nil - } - - input := *n.Diff - result := new(InstanceDiff) - - if n.Destroy { - if input.GetDestroy() || input.RequiresNew() { - result.SetDestroy(true) +// TODO: test +func (n *EvalReduceDiff) Eval(ctx EvalContext) (interface{}, error) { + in := *n.InChange + out := in.Simplify(n.Destroy) + if n.OutChange != nil { + *n.OutChange = out + } + if out.Action != in.Action { + if n.Destroy { + log.Printf("[TRACE] EvalReduceDiff: %s change simplified from %s to %s for destroy node", n.Addr, in.Action, out.Action) + } else { + log.Printf("[TRACE] EvalReduceDiff: %s change simplified from %s to %s for apply node", n.Addr, in.Action, out.Action) } } - - if n.Output != nil { - *n.Output = result - } - return nil, nil } -// EvalReadDiff is an EvalNode implementation that writes the diff to -// the full diff. +// EvalReadDiff is an EvalNode implementation that retrieves the planned +// change for a particular resource instance object. type EvalReadDiff struct { - Name string - Diff **InstanceDiff + Addr addrs.ResourceInstance + DeposedKey states.DeposedKey + ProviderSchema **ProviderSchema + Change **plans.ResourceInstanceChange } func (n *EvalReadDiff) Eval(ctx EvalContext) (interface{}, error) { - diff, lock := ctx.Diff() + providerSchema := *n.ProviderSchema + changes := ctx.Changes() + addr := n.Addr.Absolute(ctx.Path()) - // Acquire the lock so that we can do this safely concurrently - lock.Lock() - defer lock.Unlock() + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) + } - // Write the diff - modDiff := diff.ModuleByPath(ctx.Path()) - if modDiff == nil { + gen := states.CurrentGen + if n.DeposedKey != states.NotDeposed { + gen = n.DeposedKey + } + csrc := changes.GetResourceInstanceChange(addr, gen) + if csrc == nil { + log.Printf("[TRACE] EvalReadDiff: No planned change recorded for %s", addr) return nil, nil } - *n.Diff = modDiff.Resources[n.Name] + change, err := csrc.Decode(schema.ImpliedType()) + if err != nil { + return nil, fmt.Errorf("failed to decode planned changes for %s: %s", addr, err) + } + if n.Change != nil { + *n.Change = change + } + + log.Printf("[TRACE] EvalReadDiff: Read %s change from plan for %s", change.Action, addr) return nil, nil } -// EvalWriteDiff is an EvalNode implementation that writes the diff to -// the full diff. +// EvalWriteDiff is an EvalNode implementation that saves a planned change +// for an instance object into the set of global planned changes. type EvalWriteDiff struct { - Name string - Diff **InstanceDiff + Addr addrs.ResourceInstance + DeposedKey states.DeposedKey + ProviderSchema **ProviderSchema + Change **plans.ResourceInstanceChange } // TODO: test func (n *EvalWriteDiff) Eval(ctx EvalContext) (interface{}, error) { - diff, lock := ctx.Diff() - - // The diff to write, if its empty it should write nil - var diffVal *InstanceDiff - if n.Diff != nil { - diffVal = *n.Diff + changes := ctx.Changes() + addr := n.Addr.Absolute(ctx.Path()) + if n.Change == nil || *n.Change == nil { + // Caller sets nil to indicate that we need to remove a change from + // the set of changes. + gen := states.CurrentGen + if n.DeposedKey != states.NotDeposed { + gen = n.DeposedKey + } + changes.RemoveResourceInstanceChange(addr, gen) + return nil, nil } - if diffVal.Empty() { - diffVal = nil + + providerSchema := *n.ProviderSchema + change := *n.Change + + if change.Addr.String() != addr.String() || change.DeposedKey != n.DeposedKey { + // Should never happen, and indicates a bug in the caller. + panic("inconsistent address and/or deposed key in EvalWriteDiff") } - // Acquire the lock so that we can do this safely concurrently - lock.Lock() - defer lock.Unlock() + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) + } - // Write the diff - modDiff := diff.ModuleByPath(ctx.Path()) - if modDiff == nil { - modDiff = diff.AddModule(ctx.Path()) + csrc, err := change.Encode(schema.ImpliedType()) + if err != nil { + return nil, fmt.Errorf("failed to encode planned changes for %s: %s", addr, err) } - if diffVal != nil { - modDiff.Resources[n.Name] = diffVal + + changes.AppendResourceInstanceChange(csrc) + if n.DeposedKey == states.NotDeposed { + log.Printf("[TRACE] EvalWriteDiff: recorded %s change for %s", change.Action, addr) } else { - delete(modDiff.Resources, n.Name) + log.Printf("[TRACE] EvalWriteDiff: recorded %s change for %s deposed object %s", change.Action, addr, n.DeposedKey) } return nil, nil diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go index 62cc581fad..a60f4a0a29 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go @@ -2,47 +2,63 @@ package terraform import ( "fmt" + "log" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) // EvalImportState is an EvalNode implementation that performs an // ImportState operation on a provider. This will return the imported // states but won't modify any actual state. type EvalImportState struct { - Provider *ResourceProvider - Info *InstanceInfo - Id string - Output *[]*InstanceState + Addr addrs.ResourceInstance + Provider *providers.Interface + ID string + Output *[]providers.ImportedResource } // TODO: test func (n *EvalImportState) Eval(ctx EvalContext) (interface{}, error) { + absAddr := n.Addr.Absolute(ctx.Path()) provider := *n.Provider + var diags tfdiags.Diagnostics { // Call pre-import hook err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreImportState(n.Info, n.Id) + return h.PreImportState(absAddr, n.ID) }) if err != nil { return nil, err } } - // Import! - state, err := provider.ImportState(n.Info, n.Id) - if err != nil { - return nil, fmt.Errorf( - "import %s (id: %s): %s", n.Info.HumanId(), n.Id, err) + resp := provider.ImportResourceState(providers.ImportResourceStateRequest{ + TypeName: n.Addr.Resource.Type, + ID: n.ID, + }) + diags = diags.Append(resp.Diagnostics) + if diags.HasErrors() { + return nil, diags.Err() + } + + imported := resp.ImportedResources + + for _, obj := range imported { + log.Printf("[TRACE] EvalImportState: import %s %q produced instance object of type %s", absAddr.String(), n.ID, obj.TypeName) } if n.Output != nil { - *n.Output = state + *n.Output = imported } { // Call post-import hook err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostImportState(n.Info, state) + return h.PostImportState(absAddr, imported) }) if err != nil { return nil, err @@ -55,22 +71,25 @@ func (n *EvalImportState) Eval(ctx EvalContext) (interface{}, error) { // EvalImportStateVerify verifies the state after ImportState and // after the refresh to make sure it is non-nil and valid. type EvalImportStateVerify struct { - Info *InstanceInfo - Id string - State **InstanceState + Addr addrs.ResourceInstance + State **states.ResourceInstanceObject } // TODO: test func (n *EvalImportStateVerify) Eval(ctx EvalContext) (interface{}, error) { + var diags tfdiags.Diagnostics + state := *n.State - if state.Empty() { - return nil, fmt.Errorf( - "import %s (id: %s): Terraform detected a resource with this ID doesn't\n"+ - "exist. Please verify the ID is correct. You cannot import non-existent\n"+ - "resources using Terraform import.", - n.Info.HumanId(), - n.Id) + if state.Value.IsNull() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Cannot import non-existent remote object", + fmt.Sprintf( + "While attempting to import an existing object to %s, the provider detected that no object exists with the given id. Only pre-existing objects can be imported; check that the id is correct and that it is associated with the provider's configured region or endpoint, or use \"terraform apply\" to create a new remote object for this resource.", + n.Addr.String(), + ), + )) } - return nil, nil + return nil, diags.ErrWithWarnings() } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go deleted file mode 100644 index 6a78a6bbb9..0000000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go +++ /dev/null @@ -1,56 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/config" -) - -// EvalInterpolate is an EvalNode implementation that takes a raw -// configuration and interpolates it. -type EvalInterpolate struct { - Config *config.RawConfig - Resource *Resource - Output **ResourceConfig - ContinueOnErr bool -} - -func (n *EvalInterpolate) Eval(ctx EvalContext) (interface{}, error) { - rc, err := ctx.Interpolate(n.Config, n.Resource) - if err != nil { - if n.ContinueOnErr { - log.Printf("[WARN] Interpolation %q failed: %s", n.Config.Key, err) - return nil, EvalEarlyExitError{} - } - return nil, err - } - - if n.Output != nil { - *n.Output = rc - } - - return nil, nil -} - -// EvalInterpolateProvider is an EvalNode implementation that takes a -// ProviderConfig and interpolates it. Provider configurations are the only -// "inherited" type of configuration we have, and the original raw config may -// have a different interpolation scope. -type EvalInterpolateProvider struct { - Config *config.ProviderConfig - Resource *Resource - Output **ResourceConfig -} - -func (n *EvalInterpolateProvider) Eval(ctx EvalContext) (interface{}, error) { - rc, err := ctx.InterpolateProvider(n.Config, n.Resource) - if err != nil { - return nil, err - } - - if n.Output != nil { - *n.Output = rc - } - - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_lang.go b/vendor/github.com/hashicorp/terraform/terraform/eval_lang.go new file mode 100644 index 0000000000..0c051f76ab --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_lang.go @@ -0,0 +1,61 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform/addrs" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// EvalConfigBlock is an EvalNode implementation that takes a raw +// configuration block and evaluates any expressions within it. +// +// ExpandedConfig is populated with the result of expanding any "dynamic" +// blocks in the given body, which can be useful for extracting correct source +// location information for specific attributes in the result. +type EvalConfigBlock struct { + Config *hcl.Body + Schema *configschema.Block + SelfAddr addrs.Referenceable + Output *cty.Value + ExpandedConfig *hcl.Body + ContinueOnErr bool +} + +func (n *EvalConfigBlock) Eval(ctx EvalContext) (interface{}, error) { + val, body, diags := ctx.EvaluateBlock(*n.Config, n.Schema, n.SelfAddr, EvalDataForNoInstanceKey) + if diags.HasErrors() && n.ContinueOnErr { + log.Printf("[WARN] Block evaluation failed: %s", diags.Err()) + return nil, EvalEarlyExitError{} + } + + if n.Output != nil { + *n.Output = val + } + if n.ExpandedConfig != nil { + *n.ExpandedConfig = body + } + + return nil, diags.ErrWithWarnings() +} + +// EvalConfigExpr is an EvalNode implementation that takes a raw configuration +// expression and evaluates it. +type EvalConfigExpr struct { + Expr hcl.Expression + SelfAddr addrs.Referenceable + Output *cty.Value +} + +func (n *EvalConfigExpr) Eval(ctx EvalContext) (interface{}, error) { + val, diags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, n.SelfAddr) + + if n.Output != nil { + *n.Output = val + } + + return nil, diags.ErrWithWarnings() +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_local.go b/vendor/github.com/hashicorp/terraform/terraform/eval_local.go index a4b2a50598..bad9ac5b17 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_local.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_local.go @@ -3,56 +3,55 @@ package terraform import ( "fmt" - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/lang" + "github.com/hashicorp/terraform/tfdiags" ) // EvalLocal is an EvalNode implementation that evaluates the // expression for a local value and writes it into a transient part of // the state. type EvalLocal struct { - Name string - Value *config.RawConfig + Addr addrs.LocalValue + Expr hcl.Expression } func (n *EvalLocal) Eval(ctx EvalContext) (interface{}, error) { - cfg, err := ctx.Interpolate(n.Value, nil) - if err != nil { - return nil, fmt.Errorf("local.%s: %s", n.Name, err) + var diags tfdiags.Diagnostics + + // We ignore diags here because any problems we might find will be found + // again in EvaluateExpr below. + refs, _ := lang.ReferencesInExpr(n.Expr) + for _, ref := range refs { + if ref.Subject == n.Addr { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Self-referencing local value", + Detail: fmt.Sprintf("Local value %s cannot use its own result as part of its expression.", n.Addr), + Subject: ref.SourceRange.ToHCL().Ptr(), + Context: n.Expr.Range().Ptr(), + }) + } } - - state, lock := ctx.State() - if state == nil { - return nil, fmt.Errorf("cannot write local value to nil state") + if diags.HasErrors() { + return nil, diags.Err() } - // Get a write lock so we can access the state - lock.Lock() - defer lock.Unlock() - - // Look for the module state. If we don't have one, create it. - mod := state.ModuleByPath(ctx.Path()) - if mod == nil { - mod = state.AddModule(ctx.Path()) + val, moreDiags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, nil) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + return nil, diags.Err() } - // Get the value from the config - var valueRaw interface{} = config.UnknownVariableValue - if cfg != nil { - var ok bool - valueRaw, ok = cfg.Get("value") - if !ok { - valueRaw = "" - } - if cfg.IsComputed("value") { - valueRaw = config.UnknownVariableValue - } + state := ctx.State() + if state == nil { + return nil, fmt.Errorf("cannot write local value to nil state") } - if mod.Locals == nil { - // initialize - mod.Locals = map[string]interface{}{} - } - mod.Locals[n.Name] = valueRaw + state.SetLocalValue(n.Addr.Absolute(ctx.Path()), val) return nil, nil } @@ -61,26 +60,15 @@ func (n *EvalLocal) Eval(ctx EvalContext) (interface{}, error) { // from the state. Locals aren't persisted, but we don't need to evaluate them // during destroy. type EvalDeleteLocal struct { - Name string + Addr addrs.LocalValue } func (n *EvalDeleteLocal) Eval(ctx EvalContext) (interface{}, error) { - state, lock := ctx.State() + state := ctx.State() if state == nil { return nil, nil } - // Get a write lock so we can access this instance - lock.Lock() - defer lock.Unlock() - - // Look for the module state. If we don't have one, create it. - mod := state.ModuleByPath(ctx.Path()) - if mod == nil { - return nil, nil - } - - delete(mod.Locals, n.Name) - + state.RemoveLocalValue(n.Addr.Absolute(ctx.Path())) return nil, nil } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go index a8346276f3..10573971fb 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go @@ -4,131 +4,132 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" ) // EvalDeleteOutput is an EvalNode implementation that deletes an output // from the state. type EvalDeleteOutput struct { - Name string + Addr addrs.OutputValue } // TODO: test func (n *EvalDeleteOutput) Eval(ctx EvalContext) (interface{}, error) { - state, lock := ctx.State() + state := ctx.State() if state == nil { return nil, nil } - // Get a write lock so we can access this instance - lock.Lock() - defer lock.Unlock() - - // Look for the module state. If we don't have one, create it. - mod := state.ModuleByPath(ctx.Path()) - if mod == nil { - return nil, nil - } - - delete(mod.Outputs, n.Name) - + state.RemoveOutputValue(n.Addr.Absolute(ctx.Path())) return nil, nil } // EvalWriteOutput is an EvalNode implementation that writes the output // for the given name to the current state. type EvalWriteOutput struct { - Name string + Addr addrs.OutputValue Sensitive bool - Value *config.RawConfig + Expr hcl.Expression // ContinueOnErr allows interpolation to fail during Input ContinueOnErr bool } // TODO: test func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) { - // This has to run before we have a state lock, since interpolation also + addr := n.Addr.Absolute(ctx.Path()) + + // This has to run before we have a state lock, since evaluation also // reads the state - cfg, err := ctx.Interpolate(n.Value, nil) - // handle the error after we have the module from the state + val, diags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, nil) + // We'll handle errors below, after we have loaded the module. - state, lock := ctx.State() + state := ctx.State() if state == nil { - return nil, fmt.Errorf("cannot write state to nil state") + return nil, nil } - // Get a write lock so we can access this instance - lock.Lock() - defer lock.Unlock() - // Look for the module state. If we don't have one, create it. - mod := state.ModuleByPath(ctx.Path()) - if mod == nil { - mod = state.AddModule(ctx.Path()) - } + changes := ctx.Changes() // may be nil, if we're not working on a changeset // handling the interpolation error - if err != nil { + if diags.HasErrors() { if n.ContinueOnErr || flagWarnOutputErrors { - log.Printf("[ERROR] Output interpolation %q failed: %s", n.Name, err) + log.Printf("[ERROR] Output interpolation %q failed: %s", n.Addr.Name, diags.Err()) // if we're continuing, make sure the output is included, and - // marked as unknown - mod.Outputs[n.Name] = &OutputState{ - Type: "string", - Value: config.UnknownVariableValue, - } + // marked as unknown. If the evaluator was able to find a type + // for the value in spite of the error then we'll use it. + n.setValue(addr, state, changes, cty.UnknownVal(val.Type())) return nil, EvalEarlyExitError{} } - return nil, err + return nil, diags.Err() } - // Get the value from the config - var valueRaw interface{} = config.UnknownVariableValue - if cfg != nil { - var ok bool - valueRaw, ok = cfg.Get("value") - if !ok { - valueRaw = "" - } - if cfg.IsComputed("value") { - valueRaw = config.UnknownVariableValue - } + n.setValue(addr, state, changes, val) + + return nil, nil +} + +func (n *EvalWriteOutput) setValue(addr addrs.AbsOutputValue, state *states.SyncState, changes *plans.ChangesSync, val cty.Value) { + if val.IsKnown() && !val.IsNull() { + // The state itself doesn't represent unknown values, so we null them + // out here and then we'll save the real unknown value in the planned + // changeset below, if we have one on this graph walk. + log.Printf("[TRACE] EvalWriteOutput: Saving value for %s in state", addr) + stateVal := cty.UnknownAsNull(val) + state.SetOutputValue(addr, stateVal, n.Sensitive) + } else { + log.Printf("[TRACE] EvalWriteOutput: Removing %s from state (it is now null)", addr) + state.RemoveOutputValue(addr) } - switch valueTyped := valueRaw.(type) { - case string: - mod.Outputs[n.Name] = &OutputState{ - Type: "string", - Sensitive: n.Sensitive, - Value: valueTyped, - } - case []interface{}: - mod.Outputs[n.Name] = &OutputState{ - Type: "list", - Sensitive: n.Sensitive, - Value: valueTyped, - } - case map[string]interface{}: - mod.Outputs[n.Name] = &OutputState{ - Type: "map", - Sensitive: n.Sensitive, - Value: valueTyped, - } - case []map[string]interface{}: - // an HCL map is multi-valued, so if this was read out of a config the - // map may still be in a slice. - if len(valueTyped) == 1 { - mod.Outputs[n.Name] = &OutputState{ - Type: "map", + // If we also have an active changeset then we'll replicate the value in + // there. This is used in preference to the state where present, since it + // *is* able to represent unknowns, while the state cannot. + if changes != nil { + // For the moment we are not properly tracking changes to output + // values, and just marking them always as "Create" or "Destroy" + // actions. A future release will rework the output lifecycle so we + // can track their changes properly, in a similar way to how we work + // with resource instances. + + var change *plans.OutputChange + if !val.IsNull() { + change = &plans.OutputChange{ + Addr: addr, Sensitive: n.Sensitive, - Value: valueTyped[0], + Change: plans.Change{ + Action: plans.Create, + Before: cty.NullVal(cty.DynamicPseudoType), + After: val, + }, + } + } else { + change = &plans.OutputChange{ + Addr: addr, + Sensitive: n.Sensitive, + Change: plans.Change{ + // This is just a weird placeholder delete action since + // we don't have an actual prior value to indicate. + // FIXME: Generate real planned changes for output values + // that include the old values. + Action: plans.Delete, + Before: cty.NullVal(cty.DynamicPseudoType), + After: cty.NullVal(cty.DynamicPseudoType), + }, } - break } - return nil, fmt.Errorf("output %s type (%T) with %d values not valid for type map", - n.Name, valueTyped, len(valueTyped)) - default: - return nil, fmt.Errorf("output %s is not a valid type (%T)\n", n.Name, valueTyped) - } - return nil, nil + cs, err := change.Encode() + if err != nil { + // Should never happen, since we just constructed this right above + panic(fmt.Sprintf("planned change for %s could not be encoded: %s", addr, err)) + } + log.Printf("[TRACE] EvalWriteOutput: Saving %s change for %s in changeset", change.Action, addr) + changes.RemoveOutputChange(addr) // remove any existing planned change, if present + changes.AppendOutputChange(cs) // add the new planned change + } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go index 61f6ff941b..7df6584a1e 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go @@ -2,50 +2,86 @@ package terraform import ( "fmt" + "log" - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/hcl2/hcl" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/tfdiags" ) -// EvalBuildProviderConfig outputs a *ResourceConfig that is properly -// merged with parents and inputs on top of what is configured in the file. -type EvalBuildProviderConfig struct { - Provider string - Config **ResourceConfig - Output **ResourceConfig -} +func buildProviderConfig(ctx EvalContext, addr addrs.ProviderConfig, config *configs.Provider) hcl.Body { + var configBody hcl.Body + if config != nil { + configBody = config.Config + } -func (n *EvalBuildProviderConfig) Eval(ctx EvalContext) (interface{}, error) { - cfg := *n.Config - - // If we have an Input configuration set, then merge that in - if input := ctx.ProviderInput(n.Provider); input != nil { - // "input" is a map of the subset of config values that were known - // during the input walk, set by EvalInputProvider. Note that - // in particular it does *not* include attributes that had - // computed values at input time; those appear *only* in - // "cfg" here. - rc, err := config.NewRawConfig(input) - if err != nil { - return nil, err - } - - merged := rc.Merge(cfg.raw) - cfg = NewResourceConfig(merged) + var inputBody hcl.Body + inputConfig := ctx.ProviderInput(addr) + if len(inputConfig) > 0 { + inputBody = configs.SynthBody("", inputConfig) } - *n.Output = cfg - return nil, nil + switch { + case configBody != nil && inputBody != nil: + log.Printf("[TRACE] buildProviderConfig for %s: merging explicit config and input", addr) + // Note that the inputBody is the _base_ here, because configs.MergeBodies + // expects the base have all of the required fields, while these are + // forced to be optional for the override. The input process should + // guarantee that we have a value for each of the required arguments and + // that in practice the sets of attributes in each body will be + // disjoint. + return configs.MergeBodies(inputBody, configBody) + case configBody != nil: + log.Printf("[TRACE] buildProviderConfig for %s: using explicit config only", addr) + return configBody + case inputBody != nil: + log.Printf("[TRACE] buildProviderConfig for %s: using input only", addr) + return inputBody + default: + log.Printf("[TRACE] buildProviderConfig for %s: no configuration at all", addr) + return hcl.EmptyBody() + } } // EvalConfigProvider is an EvalNode implementation that configures // a provider that is already initialized and retrieved. type EvalConfigProvider struct { - Provider string - Config **ResourceConfig + Addr addrs.ProviderConfig + Provider *providers.Interface + Config *configs.Provider } func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) { - return nil, ctx.ConfigureProvider(n.Provider, *n.Config) + if n.Provider == nil { + return nil, fmt.Errorf("EvalConfigProvider Provider is nil") + } + + var diags tfdiags.Diagnostics + provider := *n.Provider + config := n.Config + + configBody := buildProviderConfig(ctx, n.Addr, config) + + resp := provider.GetSchema() + diags = diags.Append(resp.Diagnostics) + if diags.HasErrors() { + return nil, diags.NonFatalErr() + } + + configSchema := resp.Provider.Block + configVal, configBody, evalDiags := ctx.EvaluateBlock(configBody, configSchema, nil, EvalDataForNoInstanceKey) + diags = diags.Append(evalDiags) + if evalDiags.HasErrors() { + return nil, diags.NonFatalErr() + } + + configDiags := ctx.ConfigureProvider(n.Addr, configVal) + configDiags = configDiags.InConfigBody(configBody) + + return nil, configDiags.ErrWithWarnings() } // EvalInitProvider is an EvalNode implementation that initializes a provider @@ -53,85 +89,59 @@ func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) { // EvalGetProvider node. type EvalInitProvider struct { TypeName string - Name string + Addr addrs.ProviderConfig } func (n *EvalInitProvider) Eval(ctx EvalContext) (interface{}, error) { - return ctx.InitProvider(n.TypeName, n.Name) + return ctx.InitProvider(n.TypeName, n.Addr) } // EvalCloseProvider is an EvalNode implementation that closes provider // connections that aren't needed anymore. type EvalCloseProvider struct { - Name string + Addr addrs.ProviderConfig } func (n *EvalCloseProvider) Eval(ctx EvalContext) (interface{}, error) { - ctx.CloseProvider(n.Name) + ctx.CloseProvider(n.Addr) return nil, nil } // EvalGetProvider is an EvalNode implementation that retrieves an already // initialized provider instance for the given name. +// +// Unlike most eval nodes, this takes an _absolute_ provider configuration, +// because providers can be passed into and inherited between modules. +// Resource nodes must therefore know the absolute path of the provider they +// will use, which is usually accomplished by implementing +// interface GraphNodeProviderConsumer. type EvalGetProvider struct { - Name string - Output *ResourceProvider + Addr addrs.AbsProviderConfig + Output *providers.Interface + + // If non-nil, Schema will be updated after eval to refer to the + // schema of the provider. + Schema **ProviderSchema } func (n *EvalGetProvider) Eval(ctx EvalContext) (interface{}, error) { - result := ctx.Provider(n.Name) + if n.Addr.ProviderConfig.Type == "" { + // Should never happen + panic("EvalGetProvider used with uninitialized provider configuration address") + } + + result := ctx.Provider(n.Addr) if result == nil { - return nil, fmt.Errorf("provider %s not initialized", n.Name) + return nil, fmt.Errorf("provider %s not initialized", n.Addr) } if n.Output != nil { *n.Output = result } - return nil, nil -} - -// EvalInputProvider is an EvalNode implementation that asks for input -// for the given provider configurations. -type EvalInputProvider struct { - Name string - Provider *ResourceProvider - Config **ResourceConfig -} - -func (n *EvalInputProvider) Eval(ctx EvalContext) (interface{}, error) { - rc := *n.Config - orig := rc.DeepCopy() - - // Wrap the input into a namespace - input := &PrefixUIInput{ - IdPrefix: fmt.Sprintf("provider.%s", n.Name), - QueryPrefix: fmt.Sprintf("provider.%s.", n.Name), - UIInput: ctx.Input(), - } - - // Go through each provider and capture the input necessary - // to satisfy it. - config, err := (*n.Provider).Input(input, rc) - if err != nil { - return nil, fmt.Errorf( - "Error configuring %s: %s", n.Name, err) + if n.Schema != nil { + *n.Schema = ctx.ProviderSchema(n.Addr) } - // We only store values that have changed through Input. - // The goal is to cache cache input responses, not to provide a complete - // config for other providers. - confMap := make(map[string]interface{}) - if config != nil && len(config.Config) > 0 { - // any values that weren't in the original ResourcConfig will be cached - for k, v := range config.Config { - if _, ok := orig.Config[k]; !ok { - confMap[k] = v - } - } - } - - ctx.SetProviderInput(n.Name, confMap) - return nil, nil } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go index 89579c0557..bc6b5cc76d 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go @@ -2,6 +2,9 @@ package terraform import ( "fmt" + + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/provisioners" ) // EvalInitProvisioner is an EvalNode implementation that initializes a provisioner @@ -30,7 +33,8 @@ func (n *EvalCloseProvisioner) Eval(ctx EvalContext) (interface{}, error) { // initialized provisioner instance for the given name. type EvalGetProvisioner struct { Name string - Output *ResourceProvisioner + Output *provisioners.Interface + Schema **configschema.Block } func (n *EvalGetProvisioner) Eval(ctx EvalContext) (interface{}, error) { @@ -43,5 +47,9 @@ func (n *EvalGetProvisioner) Eval(ctx EvalContext) (interface{}, error) { *n.Output = result } + if n.Schema != nil { + *n.Schema = ctx.ProvisionerSchema(n.Name) + } + return result, nil } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go index fb85a284e8..d89d02ba91 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go @@ -2,105 +2,439 @@ package terraform import ( "fmt" + "log" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/plans/objchange" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) +// EvalReadData is an EvalNode implementation that deals with the main part +// of the data resource lifecycle: either actually reading from the data source +// or generating a plan to do so. +type EvalReadData struct { + Addr addrs.ResourceInstance + Config *configs.Resource + Dependencies []addrs.Referenceable + Provider *providers.Interface + ProviderAddr addrs.AbsProviderConfig + ProviderSchema **ProviderSchema + + // Planned is set when dealing with data resources that were deferred to + // the apply walk, to let us see what was planned. If this is set, the + // evaluation of the config is required to produce a wholly-known + // configuration which is consistent with the partial object included + // in this planned change. + Planned **plans.ResourceInstanceChange + + // ForcePlanRead, if true, overrides the usual behavior of immediately + // reading from the data source where possible, instead forcing us to + // _always_ generate a plan. This is used during the plan walk, since we + // mustn't actually apply anything there. (The resulting state doesn't + // get persisted) + ForcePlanRead bool + + // The result from this EvalNode has a few different possibilities + // depending on the input: + // - If Planned is nil then we assume we're aiming to _produce_ the plan, + // and so the following two outcomes are possible: + // - OutputChange.Action is plans.NoOp and OutputState is the complete + // result of reading from the data source. This is the easy path. + // - OutputChange.Action is plans.Read and OutputState is a planned + // object placeholder (states.ObjectPlanned). In this case, the + // returned change must be recorded in the overral changeset and + // eventually passed to another instance of this struct during the + // apply walk. + // - If Planned is non-nil then we assume we're aiming to complete a + // planned read from an earlier plan walk. In this case the only possible + // non-error outcome is to set Output.Action (if non-nil) to a plans.NoOp + // change and put the complete resulting state in OutputState, ready to + // be saved in the overall state and used for expression evaluation. + OutputChange **plans.ResourceInstanceChange + OutputValue *cty.Value + OutputConfigValue *cty.Value + OutputState **states.ResourceInstanceObject +} + +func (n *EvalReadData) Eval(ctx EvalContext) (interface{}, error) { + absAddr := n.Addr.Absolute(ctx.Path()) + log.Printf("[TRACE] EvalReadData: working on %s", absAddr) + + if n.ProviderSchema == nil || *n.ProviderSchema == nil { + return nil, fmt.Errorf("provider schema not available for %s", n.Addr) + } + + var diags tfdiags.Diagnostics + var change *plans.ResourceInstanceChange + var configVal cty.Value + + // TODO: Do we need to handle Delete changes here? EvalReadDataDiff and + // EvalReadDataApply did, but it seems like we should handle that via a + // separate mechanism since it boils down to just deleting the object from + // the state... and we do that on every plan anyway, forcing the data + // resource to re-read. + + config := *n.Config + provider := *n.Provider + providerSchema := *n.ProviderSchema + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider %q does not support data source %q", n.ProviderAddr.ProviderConfig.Type, n.Addr.Resource.Type) + } + + // We'll always start by evaluating the configuration. What we do after + // that will depend on the evaluation result along with what other inputs + // we were given. + objTy := schema.ImpliedType() + priorVal := cty.NullVal(objTy) // for data resources, prior is always null because we start fresh every time + + keyData := EvalDataForInstanceKey(n.Addr.Key) + + var configDiags tfdiags.Diagnostics + configVal, _, configDiags = ctx.EvaluateBlock(config.Config, schema, nil, keyData) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, diags.Err() + } + + proposedNewVal := objchange.PlannedDataResourceObject(schema, configVal) + + // If our configuration contains any unknown values then we must defer the + // read to the apply phase by producing a "Read" change for this resource, + // and a placeholder value for it in the state. + if n.ForcePlanRead || !configVal.IsWhollyKnown() { + // If the configuration is still unknown when we're applying a planned + // change then that indicates a bug in Terraform, since we should have + // everything resolved by now. + if n.Planned != nil && *n.Planned != nil { + return nil, fmt.Errorf( + "configuration for %s still contains unknown values during apply (this is a bug in Terraform; please report it!)", + absAddr, + ) + } + if n.ForcePlanRead { + log.Printf("[TRACE] EvalReadData: %s configuration is fully known, but we're forcing a read plan to be created", absAddr) + } else { + log.Printf("[TRACE] EvalReadData: %s configuration not fully known yet, so deferring to apply phase", absAddr) + } + + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreDiff(absAddr, states.CurrentGen, priorVal, proposedNewVal) + }) + if err != nil { + return nil, err + } + + change = &plans.ResourceInstanceChange{ + Addr: absAddr, + ProviderAddr: n.ProviderAddr, + Change: plans.Change{ + Action: plans.Read, + Before: priorVal, + After: proposedNewVal, + }, + } + + err = ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostDiff(absAddr, states.CurrentGen, change.Action, priorVal, proposedNewVal) + }) + if err != nil { + return nil, err + } + + if n.OutputChange != nil { + *n.OutputChange = change + } + if n.OutputValue != nil { + *n.OutputValue = change.After + } + if n.OutputConfigValue != nil { + *n.OutputConfigValue = configVal + } + if n.OutputState != nil { + state := &states.ResourceInstanceObject{ + Value: change.After, + Status: states.ObjectPlanned, // because the partial value in the plan must be used for now + Dependencies: n.Dependencies, + } + *n.OutputState = state + } + + return nil, diags.ErrWithWarnings() + } + + if n.Planned != nil && *n.Planned != nil && (*n.Planned).Action != plans.Read { + // If any other action gets in here then that's always a bug; this + // EvalNode only deals with reading. + return nil, fmt.Errorf( + "invalid action %s for %s: only Read is supported (this is a bug in Terraform; please report it!)", + (*n.Planned).Action, absAddr, + ) + } + + // If we get down here then our configuration is complete and we're read + // to actually call the provider to read the data. + log.Printf("[TRACE] EvalReadData: %s configuration is complete, so reading from provider", absAddr) + + err := ctx.Hook(func(h Hook) (HookAction, error) { + // We don't have a state yet, so we'll just give the hook an + // empty one to work with. + return h.PreRefresh(absAddr, states.CurrentGen, cty.NullVal(cty.DynamicPseudoType)) + }) + if err != nil { + return nil, err + } + + resp := provider.ReadDataSource(providers.ReadDataSourceRequest{ + TypeName: n.Addr.Resource.Type, + Config: configVal, + }) + diags = diags.Append(resp.Diagnostics.InConfigBody(n.Config.Config)) + if diags.HasErrors() { + return nil, diags.Err() + } + newVal := resp.State + if newVal == cty.NilVal { + // This can happen with incompletely-configured mocks. We'll allow it + // and treat it as an alias for a properly-typed null value. + newVal = cty.NullVal(schema.ImpliedType()) + } + + for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid object", + fmt.Sprintf( + "Provider %q produced an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), + ), + )) + } + if diags.HasErrors() { + return nil, diags.Err() + } + + if newVal.IsNull() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced null object", + fmt.Sprintf( + "Provider %q produced a null value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, absAddr, + ), + )) + } + if !newVal.IsWhollyKnown() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid object", + fmt.Sprintf( + "Provider %q produced a value for %s that is not wholly known.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, absAddr, + ), + )) + + // We'll still save the object, but we need to eliminate any unknown + // values first because we can't serialize them in the state file. + // Note that this may cause set elements to be coalesced if they + // differed only by having unknown values, but we don't worry about + // that here because we're saving the value only for inspection + // purposes; the error we added above will halt the graph walk. + newVal = cty.UnknownAsNull(newVal) + } + + // Since we've completed the read, we actually have no change to make, but + // we'll produce a NoOp one anyway to preserve the usual flow of the + // plan phase and allow it to produce a complete plan. + change = &plans.ResourceInstanceChange{ + Addr: absAddr, + ProviderAddr: n.ProviderAddr, + Change: plans.Change{ + Action: plans.NoOp, + Before: newVal, + After: newVal, + }, + } + state := &states.ResourceInstanceObject{ + Value: change.After, + Status: states.ObjectReady, // because we completed the read from the provider + Dependencies: n.Dependencies, + } + + err = ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostRefresh(absAddr, states.CurrentGen, change.Before, newVal) + }) + if err != nil { + return nil, err + } + + if n.OutputChange != nil { + *n.OutputChange = change + } + if n.OutputValue != nil { + *n.OutputValue = change.After + } + if n.OutputConfigValue != nil { + *n.OutputConfigValue = configVal + } + if n.OutputState != nil { + *n.OutputState = state + } + + return nil, diags.ErrWithWarnings() +} + // EvalReadDataDiff is an EvalNode implementation that executes a data // resource's ReadDataDiff method to discover what attributes it exports. type EvalReadDataDiff struct { - Provider *ResourceProvider - Output **InstanceDiff - OutputState **InstanceState - Config **ResourceConfig - Info *InstanceInfo + Addr addrs.ResourceInstance + Config *configs.Resource + ProviderAddr addrs.AbsProviderConfig + ProviderSchema **ProviderSchema + + Output **plans.ResourceInstanceChange + OutputValue *cty.Value + OutputConfigValue *cty.Value + OutputState **states.ResourceInstanceObject // Set Previous when re-evaluating diff during apply, to ensure that // the "Destroy" flag is preserved. - Previous **InstanceDiff + Previous **plans.ResourceInstanceChange } func (n *EvalReadDataDiff) Eval(ctx EvalContext) (interface{}, error) { - // TODO: test + absAddr := n.Addr.Absolute(ctx.Path()) - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff(n.Info, nil) - }) - if err != nil { - return nil, err + if n.ProviderSchema == nil || *n.ProviderSchema == nil { + return nil, fmt.Errorf("provider schema not available for %s", n.Addr) } - var diff *InstanceDiff + var diags tfdiags.Diagnostics + var change *plans.ResourceInstanceChange + var configVal cty.Value - if n.Previous != nil && *n.Previous != nil && (*n.Previous).GetDestroy() { + if n.Previous != nil && *n.Previous != nil && (*n.Previous).Action == plans.Delete { // If we're re-diffing for a diff that was already planning to // destroy, then we'll just continue with that plan. - diff = &InstanceDiff{Destroy: true} + + nullVal := cty.NullVal(cty.DynamicPseudoType) + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreDiff(absAddr, states.CurrentGen, nullVal, nullVal) + }) + if err != nil { + return nil, err + } + + change = &plans.ResourceInstanceChange{ + Addr: absAddr, + ProviderAddr: n.ProviderAddr, + Change: plans.Change{ + Action: plans.Delete, + Before: nullVal, + After: nullVal, + }, + } } else { - provider := *n.Provider config := *n.Config + providerSchema := *n.ProviderSchema + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider does not support data source %q", n.Addr.Resource.Type) + } + + objTy := schema.ImpliedType() + priorVal := cty.NullVal(objTy) // for data resources, prior is always null because we start fresh every time - var err error - diff, err = provider.ReadDataDiff(n.Info, config) + keyData := EvalDataForInstanceKey(n.Addr.Key) + + var configDiags tfdiags.Diagnostics + configVal, _, configDiags = ctx.EvaluateBlock(config.Config, schema, nil, keyData) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, diags.Err() + } + + proposedNewVal := objchange.ProposedNewObject(schema, priorVal, configVal) + + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreDiff(absAddr, states.CurrentGen, priorVal, proposedNewVal) + }) if err != nil { return nil, err } - if diff == nil { - diff = new(InstanceDiff) - } - // if id isn't explicitly set then it's always computed, because we're - // always "creating a new resource". - diff.init() - if _, ok := diff.Attributes["id"]; !ok { - diff.SetAttribute("id", &ResourceAttrDiff{ - Old: "", - NewComputed: true, - RequiresNew: true, - Type: DiffAttrOutput, - }) + change = &plans.ResourceInstanceChange{ + Addr: absAddr, + ProviderAddr: n.ProviderAddr, + Change: plans.Change{ + Action: plans.Read, + Before: priorVal, + After: proposedNewVal, + }, } } - err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostDiff(n.Info, diff) + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostDiff(absAddr, states.CurrentGen, change.Action, change.Before, change.After) }) if err != nil { return nil, err } - *n.Output = diff + if n.Output != nil { + *n.Output = change + } + if n.OutputValue != nil { + *n.OutputValue = change.After + } + if n.OutputConfigValue != nil { + *n.OutputConfigValue = configVal + } if n.OutputState != nil { - state := &InstanceState{} - *n.OutputState = state - - // Apply the diff to the returned state, so the state includes - // any attribute values that are not computed. - if !diff.Empty() && n.OutputState != nil { - *n.OutputState = state.MergeDiff(diff) + state := &states.ResourceInstanceObject{ + Value: change.After, + Status: states.ObjectReady, } + *n.OutputState = state } - return nil, nil + return nil, diags.ErrWithWarnings() } // EvalReadDataApply is an EvalNode implementation that executes a data // resource's ReadDataApply method to read data from the data source. type EvalReadDataApply struct { - Provider *ResourceProvider - Output **InstanceState - Diff **InstanceDiff - Info *InstanceInfo + Addr addrs.ResourceInstance + Provider *providers.Interface + ProviderAddr addrs.AbsProviderConfig + ProviderSchema **ProviderSchema + Output **states.ResourceInstanceObject + Config *configs.Resource + Change **plans.ResourceInstanceChange + StateReferences []addrs.Referenceable } func (n *EvalReadDataApply) Eval(ctx EvalContext) (interface{}, error) { - // TODO: test provider := *n.Provider - diff := *n.Diff + change := *n.Change + providerSchema := *n.ProviderSchema + absAddr := n.Addr.Absolute(ctx.Path()) + + var diags tfdiags.Diagnostics // If the diff is for *destroying* this resource then we'll // just drop its state and move on, since data resources don't // support an actual "destroy" action. - if diff != nil && diff.GetDestroy() { + if change != nil && change.Action == plans.Delete { if n.Output != nil { *n.Output = nil } @@ -113,27 +447,56 @@ func (n *EvalReadDataApply) Eval(ctx EvalContext) (interface{}, error) { err := ctx.Hook(func(h Hook) (HookAction, error) { // We don't have a state yet, so we'll just give the hook an // empty one to work with. - return h.PreRefresh(n.Info, &InstanceState{}) + return h.PreRefresh(absAddr, states.CurrentGen, cty.NullVal(cty.DynamicPseudoType)) }) if err != nil { return nil, err } - state, err := provider.ReadDataApply(n.Info, diff) - if err != nil { - return nil, fmt.Errorf("%s: %s", n.Info.Id, err) + resp := provider.ReadDataSource(providers.ReadDataSourceRequest{ + TypeName: n.Addr.Resource.Type, + Config: change.After, + }) + diags = diags.Append(resp.Diagnostics.InConfigBody(n.Config.Config)) + if diags.HasErrors() { + return nil, diags.Err() + } + + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider does not support data source %q", n.Addr.Resource.Type) + } + + newVal := resp.State + for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid object", + fmt.Sprintf( + "Provider %q planned an invalid value for %s. The result could not be saved.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), + ), + )) + } + if diags.HasErrors() { + return nil, diags.Err() } err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostRefresh(n.Info, state) + return h.PostRefresh(absAddr, states.CurrentGen, change.Before, newVal) }) if err != nil { return nil, err } if n.Output != nil { - *n.Output = state + *n.Output = &states.ResourceInstanceObject{ + Value: newVal, + Status: states.ObjectReady, + Dependencies: n.StateReferences, + } } - return nil, nil + return nil, diags.ErrWithWarnings() } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go index fa2b8126cd..03bc948115 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go @@ -3,53 +3,102 @@ package terraform import ( "fmt" "log" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) // EvalRefresh is an EvalNode implementation that does a refresh for // a resource. type EvalRefresh struct { - Provider *ResourceProvider - State **InstanceState - Info *InstanceInfo - Output **InstanceState + Addr addrs.ResourceInstance + ProviderAddr addrs.AbsProviderConfig + Provider *providers.Interface + ProviderSchema **ProviderSchema + State **states.ResourceInstanceObject + Output **states.ResourceInstanceObject } // TODO: test func (n *EvalRefresh) Eval(ctx EvalContext) (interface{}, error) { - provider := *n.Provider state := *n.State + absAddr := n.Addr.Absolute(ctx.Path()) + + var diags tfdiags.Diagnostics // If we have no state, we don't do any refreshing if state == nil { - log.Printf("[DEBUG] refresh: %s: no state, not refreshing", n.Info.Id) - return nil, nil + log.Printf("[DEBUG] refresh: %s: no state, so not refreshing", n.Addr.Absolute(ctx.Path())) + return nil, diags.ErrWithWarnings() + } + + schema, _ := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) } // Call pre-refresh hook err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreRefresh(n.Info, state) + return h.PreRefresh(absAddr, states.CurrentGen, state.Value) }) if err != nil { - return nil, err + return nil, diags.ErrWithWarnings() } // Refresh! - state, err = provider.Refresh(n.Info, state) - if err != nil { - return nil, fmt.Errorf("%s: %s", n.Info.Id, err.Error()) + priorVal := state.Value + req := providers.ReadResourceRequest{ + TypeName: n.Addr.Resource.Type, + PriorState: priorVal, } + provider := *n.Provider + resp := provider.ReadResource(req) + diags = diags.Append(resp.Diagnostics) + if diags.HasErrors() { + return nil, diags.Err() + } + + if resp.NewState == cty.NilVal { + // This ought not to happen in real cases since it's not possible to + // send NilVal over the plugin RPC channel, but it can come up in + // tests due to sloppy mocking. + panic("new state is cty.NilVal") + } + + for _, err := range resp.NewState.Type().TestConformance(schema.ImpliedType()) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid object", + fmt.Sprintf( + "Provider %q planned an invalid value for %s during refresh: %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, absAddr, tfdiags.FormatError(err), + ), + )) + } + if diags.HasErrors() { + return nil, diags.Err() + } + + newState := state.DeepCopy() + newState.Value = resp.NewState + // Call post-refresh hook err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostRefresh(n.Info, state) + return h.PostRefresh(absAddr, states.CurrentGen, priorVal, newState.Value) }) if err != nil { return nil, err } if n.Output != nil { - *n.Output = state + *n.Output = newState } - return nil, nil + return nil, diags.ErrWithWarnings() } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go b/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go deleted file mode 100644 index 5eca6782a6..0000000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go +++ /dev/null @@ -1,13 +0,0 @@ -package terraform - -// EvalInstanceInfo is an EvalNode implementation that fills in the -// InstanceInfo as much as it can. -type EvalInstanceInfo struct { - Info *InstanceInfo -} - -// TODO: test -func (n *EvalInstanceInfo) Eval(ctx EvalContext) (interface{}, error) { - n.Info.ModulePath = ctx.Path() - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go b/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go index 82d81782af..3485e4f14d 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go @@ -1,22 +1,37 @@ package terraform +import ( + "github.com/hashicorp/terraform/tfdiags" +) + // EvalSequence is an EvalNode that evaluates in sequence. type EvalSequence struct { Nodes []EvalNode } func (n *EvalSequence) Eval(ctx EvalContext) (interface{}, error) { + var diags tfdiags.Diagnostics + for _, n := range n.Nodes { if n == nil { continue } if _, err := EvalRaw(n, ctx); err != nil { - return nil, err + if _, isEarlyExit := err.(EvalEarlyExitError); isEarlyExit { + // In this path we abort early, losing any non-error + // diagnostics we saw earlier. + return nil, err + } + diags = diags.Append(err) + if diags.HasErrors() { + // Halt if we get some errors, but warnings are okay. + break + } } } - return nil, nil + return nil, diags.ErrWithWarnings() } // EvalNodeFilterable impl. diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go index 11826907ca..d506ce3fe1 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go @@ -2,91 +2,149 @@ package terraform import ( "fmt" + "log" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) // EvalReadState is an EvalNode implementation that reads the -// primary InstanceState for a specific resource out of the state. +// current object for a specific instance in the state. type EvalReadState struct { - Name string - Output **InstanceState + // Addr is the address of the instance to read state for. + Addr addrs.ResourceInstance + + // ProviderSchema is the schema for the provider given in Provider. + ProviderSchema **ProviderSchema + + // Provider is the provider that will subsequently perform actions on + // the the state object. This is used to perform any schema upgrades + // that might be required to prepare the stored data for use. + Provider *providers.Interface + + // Output will be written with a pointer to the retrieved object. + Output **states.ResourceInstanceObject } func (n *EvalReadState) Eval(ctx EvalContext) (interface{}, error) { - return readInstanceFromState(ctx, n.Name, n.Output, func(rs *ResourceState) (*InstanceState, error) { - return rs.Primary, nil - }) + if n.Provider == nil || *n.Provider == nil { + panic("EvalReadState used with no Provider object") + } + if n.ProviderSchema == nil || *n.ProviderSchema == nil { + panic("EvalReadState used with no ProviderSchema object") + } + + absAddr := n.Addr.Absolute(ctx.Path()) + log.Printf("[TRACE] EvalReadState: reading state for %s", absAddr) + + src := ctx.State().ResourceInstanceObject(absAddr, states.CurrentGen) + if src == nil { + // Presumably we only have deposed objects, then. + log.Printf("[TRACE] EvalReadState: no state present for %s", absAddr) + return nil, nil + } + + schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Shouldn't happen since we should've failed long ago if no schema is present + return nil, fmt.Errorf("no schema available for %s while reading state; this is a bug in Terraform and should be reported", absAddr) + } + var diags tfdiags.Diagnostics + src, diags = UpgradeResourceState(absAddr, *n.Provider, src, schema, currentVersion) + if diags.HasErrors() { + // Note that we don't have any channel to return warnings here. We'll + // accept that for now since warnings during a schema upgrade would + // be pretty weird anyway, since this operation is supposed to seem + // invisible to the user. + return nil, diags.Err() + } + + obj, err := src.Decode(schema.ImpliedType()) + if err != nil { + return nil, err + } + + if n.Output != nil { + *n.Output = obj + } + return obj, nil } // EvalReadStateDeposed is an EvalNode implementation that reads the // deposed InstanceState for a specific resource out of the state type EvalReadStateDeposed struct { - Name string - Output **InstanceState - // Index indicates which instance in the Deposed list to target, or -1 for - // the last item. - Index int + // Addr is the address of the instance to read state for. + Addr addrs.ResourceInstance + + // Key identifies which deposed object we will read. + Key states.DeposedKey + + // ProviderSchema is the schema for the provider given in Provider. + ProviderSchema **ProviderSchema + + // Provider is the provider that will subsequently perform actions on + // the the state object. This is used to perform any schema upgrades + // that might be required to prepare the stored data for use. + Provider *providers.Interface + + // Output will be written with a pointer to the retrieved object. + Output **states.ResourceInstanceObject } func (n *EvalReadStateDeposed) Eval(ctx EvalContext) (interface{}, error) { - return readInstanceFromState(ctx, n.Name, n.Output, func(rs *ResourceState) (*InstanceState, error) { - // Get the index. If it is negative, then we get the last one - idx := n.Index - if idx < 0 { - idx = len(rs.Deposed) - 1 - } - if idx >= 0 && idx < len(rs.Deposed) { - return rs.Deposed[idx], nil - } else { - return nil, fmt.Errorf("bad deposed index: %d, for resource: %#v", idx, rs) - } - }) -} + if n.Provider == nil || *n.Provider == nil { + panic("EvalReadStateDeposed used with no Provider object") + } + if n.ProviderSchema == nil || *n.ProviderSchema == nil { + panic("EvalReadStateDeposed used with no ProviderSchema object") + } -// Does the bulk of the work for the various flavors of ReadState eval nodes. -// Each node just provides a reader function to get from the ResourceState to the -// InstanceState, and this takes care of all the plumbing. -func readInstanceFromState( - ctx EvalContext, - resourceName string, - output **InstanceState, - readerFn func(*ResourceState) (*InstanceState, error), -) (*InstanceState, error) { - state, lock := ctx.State() - - // Get a read lock so we can access this instance - lock.RLock() - defer lock.RUnlock() - - // Look for the module state. If we don't have one, then it doesn't matter. - mod := state.ModuleByPath(ctx.Path()) - if mod == nil { - return nil, nil + key := n.Key + if key == states.NotDeposed { + return nil, fmt.Errorf("EvalReadStateDeposed used with no instance key; this is a bug in Terraform and should be reported") } + absAddr := n.Addr.Absolute(ctx.Path()) + log.Printf("[TRACE] EvalReadStateDeposed: reading state for %s deposed object %s", absAddr, n.Key) - // Look for the resource state. If we don't have one, then it is okay. - rs := mod.Resources[resourceName] - if rs == nil { + src := ctx.State().ResourceInstanceObject(absAddr, key) + if src == nil { + // Presumably we only have deposed objects, then. + log.Printf("[TRACE] EvalReadStateDeposed: no state present for %s deposed object %s", absAddr, n.Key) return nil, nil } - // Use the delegate function to get the instance state from the resource state - is, err := readerFn(rs) + schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Shouldn't happen since we should've failed long ago if no schema is present + return nil, fmt.Errorf("no schema available for %s while reading state; this is a bug in Terraform and should be reported", absAddr) + } + var diags tfdiags.Diagnostics + src, diags = UpgradeResourceState(absAddr, *n.Provider, src, schema, currentVersion) + if diags.HasErrors() { + // Note that we don't have any channel to return warnings here. We'll + // accept that for now since warnings during a schema upgrade would + // be pretty weird anyway, since this operation is supposed to seem + // invisible to the user. + return nil, diags.Err() + } + + obj, err := src.Decode(schema.ImpliedType()) if err != nil { return nil, err } - - // Write the result to the output pointer - if output != nil { - *output = is + if n.Output != nil { + *n.Output = obj } - - return is, nil + return obj, nil } -// EvalRequireState is an EvalNode implementation that early exits -// if the state doesn't have an ID. +// EvalRequireState is an EvalNode implementation that exits early if the given +// object is null. type EvalRequireState struct { - State **InstanceState + State **states.ResourceInstanceObject } func (n *EvalRequireState) Eval(ctx EvalContext) (interface{}, error) { @@ -95,7 +153,7 @@ func (n *EvalRequireState) Eval(ctx EvalContext) (interface{}, error) { } state := *n.State - if state == nil || state.ID == "" { + if state == nil || state.Value.IsNull() { return nil, EvalEarlyExitError{} } @@ -107,12 +165,14 @@ func (n *EvalRequireState) Eval(ctx EvalContext) (interface{}, error) { type EvalUpdateStateHook struct{} func (n *EvalUpdateStateHook) Eval(ctx EvalContext) (interface{}, error) { - state, lock := ctx.State() - - // Get a full lock. Even calling something like WriteState can modify - // (prune) the state, so we need the full lock. - lock.Lock() - defer lock.Unlock() + // In principle we could grab the lock here just long enough to take a + // deep copy and then pass that to our hooks below, but we'll instead + // hold the hook for the duration to avoid the potential confusing + // situation of us racing to call PostStateUpdate concurrently with + // different state snapshots. + stateSync := ctx.State() + state := stateSync.Lock().DeepCopy() + defer stateSync.Unlock() // Call the hook err := ctx.Hook(func(h Hook) (HookAction, error) { @@ -125,171 +185,285 @@ func (n *EvalUpdateStateHook) Eval(ctx EvalContext) (interface{}, error) { return nil, nil } -// EvalWriteState is an EvalNode implementation that writes the -// primary InstanceState for a specific resource into the state. +// EvalWriteState is an EvalNode implementation that saves the given object +// as the current object for the selected resource instance. type EvalWriteState struct { - Name string - ResourceType string - Provider string - Dependencies []string - State **InstanceState + // Addr is the address of the instance to read state for. + Addr addrs.ResourceInstance + + // State is the object state to save. + State **states.ResourceInstanceObject + + // ProviderSchema is the schema for the provider given in ProviderAddr. + ProviderSchema **ProviderSchema + + // ProviderAddr is the address of the provider configuration that + // produced the given object. + ProviderAddr addrs.AbsProviderConfig } func (n *EvalWriteState) Eval(ctx EvalContext) (interface{}, error) { - return writeInstanceToState(ctx, n.Name, n.ResourceType, n.Provider, n.Dependencies, - func(rs *ResourceState) error { - rs.Primary = *n.State - return nil - }, - ) + if n.State == nil { + // Note that a pointer _to_ nil is valid here, indicating the total + // absense of an object as we'd see during destroy. + panic("EvalWriteState used with no ResourceInstanceObject") + } + + absAddr := n.Addr.Absolute(ctx.Path()) + state := ctx.State() + + if n.ProviderAddr.ProviderConfig.Type == "" { + return nil, fmt.Errorf("failed to write state for %s, missing provider type", absAddr) + } + + obj := *n.State + if obj == nil || obj.Value.IsNull() { + // No need to encode anything: we'll just write it directly. + state.SetResourceInstanceCurrent(absAddr, nil, n.ProviderAddr) + log.Printf("[TRACE] EvalWriteState: removing state object for %s", absAddr) + return nil, nil + } + if n.ProviderSchema == nil || *n.ProviderSchema == nil { + // Should never happen, unless our state object is nil + panic("EvalWriteState used with pointer to nil ProviderSchema object") + } + + if obj != nil { + log.Printf("[TRACE] EvalWriteState: writing current state object for %s", absAddr) + } else { + log.Printf("[TRACE] EvalWriteState: removing current state object for %s", absAddr) + } + + schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // It shouldn't be possible to get this far in any real scenario + // without a schema, but we might end up here in contrived tests that + // fail to set up their world properly. + return nil, fmt.Errorf("failed to encode %s in state: no resource type schema available", absAddr) + } + src, err := obj.Encode(schema.ImpliedType(), currentVersion) + if err != nil { + return nil, fmt.Errorf("failed to encode %s in state: %s", absAddr, err) + } + + state.SetResourceInstanceCurrent(absAddr, src, n.ProviderAddr) + return nil, nil } // EvalWriteStateDeposed is an EvalNode implementation that writes // an InstanceState out to the Deposed list of a resource in the state. type EvalWriteStateDeposed struct { - Name string - ResourceType string - Provider string - Dependencies []string - State **InstanceState - // Index indicates which instance in the Deposed list to target, or -1 to append. - Index int + // Addr is the address of the instance to read state for. + Addr addrs.ResourceInstance + + // Key indicates which deposed object to write to. + Key states.DeposedKey + + // State is the object state to save. + State **states.ResourceInstanceObject + + // ProviderSchema is the schema for the provider given in ProviderAddr. + ProviderSchema **ProviderSchema + + // ProviderAddr is the address of the provider configuration that + // produced the given object. + ProviderAddr addrs.AbsProviderConfig } func (n *EvalWriteStateDeposed) Eval(ctx EvalContext) (interface{}, error) { - return writeInstanceToState(ctx, n.Name, n.ResourceType, n.Provider, n.Dependencies, - func(rs *ResourceState) error { - if n.Index == -1 { - rs.Deposed = append(rs.Deposed, *n.State) - } else { - rs.Deposed[n.Index] = *n.State - } - return nil - }, - ) -} + if n.State == nil { + // Note that a pointer _to_ nil is valid here, indicating the total + // absense of an object as we'd see during destroy. + panic("EvalWriteStateDeposed used with no ResourceInstanceObject") + } -// Pulls together the common tasks of the EvalWriteState nodes. All the args -// are passed directly down from the EvalNode along with a `writer` function -// which is yielded the *ResourceState and is responsible for writing an -// InstanceState to the proper field in the ResourceState. -func writeInstanceToState( - ctx EvalContext, - resourceName string, - resourceType string, - provider string, - dependencies []string, - writerFn func(*ResourceState) error, -) (*InstanceState, error) { - state, lock := ctx.State() - if state == nil { - return nil, fmt.Errorf("cannot write state to nil state") - } - - // Get a write lock so we can access this instance - lock.Lock() - defer lock.Unlock() - - // Look for the module state. If we don't have one, create it. - mod := state.ModuleByPath(ctx.Path()) - if mod == nil { - mod = state.AddModule(ctx.Path()) - } - - // Look for the resource state. - rs := mod.Resources[resourceName] - if rs == nil { - rs = &ResourceState{} - rs.init() - mod.Resources[resourceName] = rs - } - rs.Type = resourceType - rs.Dependencies = dependencies - rs.Provider = provider - - if err := writerFn(rs); err != nil { - return nil, err + absAddr := n.Addr.Absolute(ctx.Path()) + key := n.Key + state := ctx.State() + + if key == states.NotDeposed { + // should never happen + return nil, fmt.Errorf("can't save deposed object for %s without a deposed key; this is a bug in Terraform that should be reported", absAddr) + } + + obj := *n.State + if obj == nil { + // No need to encode anything: we'll just write it directly. + state.SetResourceInstanceDeposed(absAddr, key, nil, n.ProviderAddr) + log.Printf("[TRACE] EvalWriteStateDeposed: removing state object for %s deposed %s", absAddr, key) + return nil, nil + } + if n.ProviderSchema == nil || *n.ProviderSchema == nil { + // Should never happen, unless our state object is nil + panic("EvalWriteStateDeposed used with no ProviderSchema object") + } + + schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // It shouldn't be possible to get this far in any real scenario + // without a schema, but we might end up here in contrived tests that + // fail to set up their world properly. + return nil, fmt.Errorf("failed to encode %s in state: no resource type schema available", absAddr) + } + src, err := obj.Encode(schema.ImpliedType(), currentVersion) + if err != nil { + return nil, fmt.Errorf("failed to encode %s in state: %s", absAddr, err) } + log.Printf("[TRACE] EvalWriteStateDeposed: writing state object for %s deposed %s", absAddr, key) + state.SetResourceInstanceDeposed(absAddr, key, src, n.ProviderAddr) return nil, nil } -// EvalDeposeState is an EvalNode implementation that takes the primary -// out of a state and makes it Deposed. This is done at the beginning of -// create-before-destroy calls so that the create can create while preserving -// the old state of the to-be-destroyed resource. +// EvalDeposeState is an EvalNode implementation that moves the current object +// for the given instance to instead be a deposed object, leaving the instance +// with no current object. +// This is used at the beginning of a create-before-destroy replace action so +// that the create can create while preserving the old state of the +// to-be-destroyed object. type EvalDeposeState struct { - Name string + Addr addrs.ResourceInstance + + // ForceKey, if a value other than states.NotDeposed, will be used as the + // key for the newly-created deposed object that results from this action. + // If set to states.NotDeposed (the zero value), a new unique key will be + // allocated. + ForceKey states.DeposedKey + + // OutputKey, if non-nil, will be written with the deposed object key that + // was generated for the object. This can then be passed to + // EvalUndeposeState.Key so it knows which deposed instance to forget. + OutputKey *states.DeposedKey } // TODO: test func (n *EvalDeposeState) Eval(ctx EvalContext) (interface{}, error) { - state, lock := ctx.State() - - // Get a read lock so we can access this instance - lock.RLock() - defer lock.RUnlock() - - // Look for the module state. If we don't have one, then it doesn't matter. - mod := state.ModuleByPath(ctx.Path()) - if mod == nil { - return nil, nil - } - - // Look for the resource state. If we don't have one, then it is okay. - rs := mod.Resources[n.Name] - if rs == nil { - return nil, nil + absAddr := n.Addr.Absolute(ctx.Path()) + state := ctx.State() + + var key states.DeposedKey + if n.ForceKey == states.NotDeposed { + key = state.DeposeResourceInstanceObject(absAddr) + } else { + key = n.ForceKey + state.DeposeResourceInstanceObjectForceKey(absAddr, key) } + log.Printf("[TRACE] EvalDeposeState: prior object for %s now deposed with key %s", absAddr, key) - // If we don't have a primary, we have nothing to depose - if rs.Primary == nil { - return nil, nil + if n.OutputKey != nil { + *n.OutputKey = key } - // Depose - rs.Deposed = append(rs.Deposed, rs.Primary) - rs.Primary = nil - return nil, nil } -// EvalUndeposeState is an EvalNode implementation that reads the -// InstanceState for a specific resource out of the state. -type EvalUndeposeState struct { - Name string - State **InstanceState +// EvalMaybeRestoreDeposedObject is an EvalNode implementation that will +// restore a particular deposed object of the specified resource instance +// to be the "current" object if and only if the instance doesn't currently +// have a current object. +// +// This is intended for use when the create leg of a create before destroy +// fails with no partial new object: if we didn't take any action, the user +// would be left in the unfortunate situation of having no current object +// and the previously-workign object now deposed. This EvalNode causes a +// better outcome by restoring things to how they were before the replace +// operation began. +// +// The create operation may have produced a partial result even though it +// failed and it's important that we don't "forget" that state, so in that +// situation the prior object remains deposed and the partial new object +// remains the current object, allowing the situation to hopefully be +// improved in a subsequent run. +type EvalMaybeRestoreDeposedObject struct { + Addr addrs.ResourceInstance + + // Key is a pointer to the deposed object key that should be forgotten + // from the state, which must be non-nil. + Key *states.DeposedKey } // TODO: test -func (n *EvalUndeposeState) Eval(ctx EvalContext) (interface{}, error) { - state, lock := ctx.State() +func (n *EvalMaybeRestoreDeposedObject) Eval(ctx EvalContext) (interface{}, error) { + absAddr := n.Addr.Absolute(ctx.Path()) + dk := *n.Key + state := ctx.State() + + restored := state.MaybeRestoreResourceInstanceDeposed(absAddr, dk) + if restored { + log.Printf("[TRACE] EvalMaybeRestoreDeposedObject: %s deposed object %s was restored as the current object", absAddr, dk) + } else { + log.Printf("[TRACE] EvalMaybeRestoreDeposedObject: %s deposed object %s remains deposed", absAddr, dk) + } - // Get a read lock so we can access this instance - lock.RLock() - defer lock.RUnlock() + return nil, nil +} - // Look for the module state. If we don't have one, then it doesn't matter. - mod := state.ModuleByPath(ctx.Path()) - if mod == nil { - return nil, nil - } +// EvalWriteResourceState is an EvalNode implementation that ensures that +// a suitable resource-level state record is present in the state, if that's +// required for the "each mode" of that resource. +// +// This is important primarily for the situation where count = 0, since this +// eval is the only change we get to set the resource "each mode" to list +// in that case, allowing expression evaluation to see it as a zero-element +// list rather than as not set at all. +type EvalWriteResourceState struct { + Addr addrs.Resource + Config *configs.Resource + ProviderAddr addrs.AbsProviderConfig +} - // Look for the resource state. If we don't have one, then it is okay. - rs := mod.Resources[n.Name] - if rs == nil { - return nil, nil +// TODO: test +func (n *EvalWriteResourceState) Eval(ctx EvalContext) (interface{}, error) { + var diags tfdiags.Diagnostics + absAddr := n.Addr.Absolute(ctx.Path()) + state := ctx.State() + + count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx) + diags = diags.Append(countDiags) + if countDiags.HasErrors() { + return nil, diags.Err() } - // If we don't have any desposed resource, then we don't have anything to do - if len(rs.Deposed) == 0 { - return nil, nil + // Currently we ony support NoEach and EachList, because for_each support + // is not fully wired up across Terraform. Once for_each support is added, + // we'll need to handle that here too, setting states.EachMap if the + // assigned expression is a map. + eachMode := states.NoEach + if count >= 0 { // -1 signals "count not set" + eachMode = states.EachList } - // Undepose - idx := len(rs.Deposed) - 1 - rs.Primary = rs.Deposed[idx] - rs.Deposed[idx] = *n.State + // This method takes care of all of the business logic of updating this + // while ensuring that any existing instances are preserved, etc. + state.SetResourceMeta(absAddr, eachMode, n.ProviderAddr) + + return nil, nil +} + +// EvalForgetResourceState is an EvalNode implementation that prunes out an +// empty resource-level state for a given resource address, or produces an +// error if it isn't empty after all. +// +// This should be the last action taken for a resource that has been removed +// from the configuration altogether, to clean up the leftover husk of the +// resource in the state after other EvalNodes have destroyed and removed +// all of the instances and instance objects beneath it. +type EvalForgetResourceState struct { + Addr addrs.Resource +} + +func (n *EvalForgetResourceState) Eval(ctx EvalContext) (interface{}, error) { + absAddr := n.Addr.Absolute(ctx.Path()) + state := ctx.State() + + pruned := state.RemoveResourceIfEmpty(absAddr) + if !pruned { + // If this produces an error, it indicates a bug elsewhere in Terraform + // -- probably missing graph nodes, graph edges, or + // incorrectly-implemented evaluation steps. + return nil, fmt.Errorf("orphan resource %s still has a non-empty state after apply; this is a bug in Terraform", absAddr) + } + log.Printf("[TRACE] EvalForgetResourceState: Pruned husk of %s from state", absAddr) return nil, nil } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_state_upgrade.go b/vendor/github.com/hashicorp/terraform/terraform/eval_state_upgrade.go new file mode 100644 index 0000000000..b7720d71bd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_state_upgrade.go @@ -0,0 +1,115 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" +) + +// UpgradeResourceState will, if necessary, run the provider-defined upgrade +// logic against the given state object to make it compliant with the +// current schema version. This is a no-op if the given state object is +// already at the latest version. +// +// If any errors occur during upgrade, error diagnostics are returned. In that +// case it is not safe to proceed with using the original state object. +func UpgradeResourceState(addr addrs.AbsResourceInstance, provider providers.Interface, src *states.ResourceInstanceObjectSrc, currentSchema *configschema.Block, currentVersion uint64) (*states.ResourceInstanceObjectSrc, tfdiags.Diagnostics) { + currentTy := currentSchema.ImpliedType() + + // If the state is currently in flatmap format and the current schema + // contains DynamicPseudoType attributes then we won't be able to convert + // it to JSON without the provider's help even if the schema version matches, + // since only the provider knows how to interpret the dynamic attribute + // value in flatmap format to convert it to JSON. + schemaHasDynamic := currentTy.HasDynamicTypes() + stateIsFlatmap := len(src.AttrsJSON) == 0 + forceProviderUpgrade := schemaHasDynamic && stateIsFlatmap + + if src.SchemaVersion == currentVersion && !forceProviderUpgrade { + // No upgrading required, then. + return src, nil + } + if addr.Resource.Resource.Mode != addrs.ManagedResourceMode { + // We only do state upgrading for managed resources. + return src, nil + } + + providerType := addr.Resource.Resource.DefaultProviderConfig().Type + if src.SchemaVersion > currentVersion { + log.Printf("[TRACE] UpgradeResourceState: can't downgrade state for %s from version %d to %d", addr, src.SchemaVersion, currentVersion) + var diags tfdiags.Diagnostics + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Resource instance managed by newer provider version", + // This is not a very good error message, but we don't retain enough + // information in state to give good feedback on what provider + // version might be required here. :( + fmt.Sprintf("The current state of %s was created by a newer provider version than is currently selected. Upgrade the %s provider to work with this state.", addr, providerType), + )) + return nil, diags + } + + // If we get down here then we need to upgrade the state, with the + // provider's help. + // If this state was originally created by a version of Terraform prior to + // v0.12, this also includes translating from legacy flatmap to new-style + // representation, since only the provider has enough information to + // understand a flatmap built against an older schema. + log.Printf("[TRACE] UpgradeResourceState: upgrading state for %s from version %d to %d using provider %q", addr, src.SchemaVersion, currentVersion, providerType) + + req := providers.UpgradeResourceStateRequest{ + TypeName: addr.Resource.Resource.Type, + + // TODO: The internal schema version representations are all using + // uint64 instead of int64, but unsigned integers aren't friendly + // to all protobuf target languages so in practice we use int64 + // on the wire. In future we will change all of our internal + // representations to int64 too. + Version: int64(src.SchemaVersion), + } + + if stateIsFlatmap { + req.RawStateFlatmap = src.AttrsFlat + } else { + req.RawStateJSON = src.AttrsJSON + } + + resp := provider.UpgradeResourceState(req) + diags := resp.Diagnostics + if diags.HasErrors() { + return nil, diags + } + + // After upgrading, the new value must conform to the current schema. When + // going over RPC this is actually already ensured by the + // marshaling/unmarshaling of the new value, but we'll check it here + // anyway for robustness, e.g. for in-process providers. + newValue := resp.UpgradedState + if errs := newValue.Type().TestConformance(currentSchema.ImpliedType()); len(errs) > 0 { + for _, err := range errs { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource state upgrade", + fmt.Sprintf("The %s provider upgraded the state for %s from a previous version, but produced an invalid result: %s.", providerType, addr, tfdiags.FormatError(err)), + )) + } + return nil, diags + } + + new, err := src.CompleteUpgrade(newValue, currentSchema.ImpliedType(), uint64(currentVersion)) + if err != nil { + // We already checked for type conformance above, so getting into this + // codepath should be rare and is probably a bug somewhere under CompleteUpgrade. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to encode result of resource state upgrade", + fmt.Sprintf("Failed to encode state for %s after resource schema upgrade: %s.", addr, tfdiags.FormatError(err)), + )) + } + return new, diags +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go index 3e5a84ce65..0033e01ac2 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go @@ -2,126 +2,163 @@ package terraform import ( "fmt" - - "github.com/hashicorp/terraform/config" - "github.com/mitchellh/mapstructure" + "log" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/provisioners" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/gocty" ) -// EvalValidateError is the error structure returned if there were -// validation errors. -type EvalValidateError struct { - Warnings []string - Errors []error -} - -func (e *EvalValidateError) Error() string { - return fmt.Sprintf("Warnings: %s. Errors: %s", e.Warnings, e.Errors) -} - // EvalValidateCount is an EvalNode implementation that validates // the count of a resource. type EvalValidateCount struct { - Resource *config.Resource + Resource *configs.Resource } // TODO: test func (n *EvalValidateCount) Eval(ctx EvalContext) (interface{}, error) { + var diags tfdiags.Diagnostics var count int - var errs []error var err error - if _, err := ctx.Interpolate(n.Resource.RawCount, nil); err != nil { - errs = append(errs, fmt.Errorf( - "Failed to interpolate count: %s", err)) + + val, valDiags := ctx.EvaluateExpr(n.Resource.Count, cty.Number, nil) + diags = diags.Append(valDiags) + if valDiags.HasErrors() { goto RETURN } - - count, err = n.Resource.Count() - if err != nil { - // If we can't get the count during validation, then - // just replace it with the number 1. - c := n.Resource.RawCount.Config() - c[n.Resource.RawCount.Key] = "1" - count = 1 + if val.IsNull() || !val.IsKnown() { + goto RETURN } - err = nil - if count < 0 { - errs = append(errs, fmt.Errorf( - "Count is less than zero: %d", count)) + err = gocty.FromCtyValue(val, &count) + if err != nil { + // The EvaluateExpr call above already guaranteed us a number value, + // so if we end up here then we have something that is out of range + // for an int, and the error message will include a description of + // the valid range. + rawVal := val.AsBigFloat() + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count value", + Detail: fmt.Sprintf("The number %s is not a valid count value: %s.", rawVal, err), + Subject: n.Resource.Count.Range().Ptr(), + }) + } else if count < 0 { + rawVal := val.AsBigFloat() + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count value", + Detail: fmt.Sprintf("The number %s is not a valid count value: count must not be negative.", rawVal), + Subject: n.Resource.Count.Range().Ptr(), + }) } RETURN: - if len(errs) != 0 { - err = &EvalValidateError{ - Errors: errs, - } - } - return nil, err + return nil, diags.NonFatalErr() } // EvalValidateProvider is an EvalNode implementation that validates -// the configuration of a resource. +// a provider configuration. type EvalValidateProvider struct { - Provider *ResourceProvider - Config **ResourceConfig + Addr addrs.ProviderConfig + Provider *providers.Interface + Config *configs.Provider } func (n *EvalValidateProvider) Eval(ctx EvalContext) (interface{}, error) { + var diags tfdiags.Diagnostics provider := *n.Provider - config := *n.Config - warns, errs := provider.Validate(config) - if len(warns) == 0 && len(errs) == 0 { - return nil, nil + configBody := buildProviderConfig(ctx, n.Addr, n.Config) + + resp := provider.GetSchema() + diags = diags.Append(resp.Diagnostics) + if diags.HasErrors() { + return nil, diags.NonFatalErr() } - return nil, &EvalValidateError{ - Warnings: warns, - Errors: errs, + configSchema := resp.Provider.Block + if configSchema == nil { + // Should never happen in real code, but often comes up in tests where + // mock schemas are being used that tend to be incomplete. + log.Printf("[WARN] EvalValidateProvider: no config schema is available for %s, so using empty schema", n.Addr) + configSchema = &configschema.Block{} } + + configVal, configBody, evalDiags := ctx.EvaluateBlock(configBody, configSchema, nil, EvalDataForNoInstanceKey) + diags = diags.Append(evalDiags) + if evalDiags.HasErrors() { + return nil, diags.NonFatalErr() + } + + req := providers.PrepareProviderConfigRequest{ + Config: configVal, + } + + validateResp := provider.PrepareProviderConfig(req) + diags = diags.Append(validateResp.Diagnostics) + + return nil, diags.NonFatalErr() } // EvalValidateProvisioner is an EvalNode implementation that validates -// the configuration of a resource. +// the configuration of a provisioner belonging to a resource. The provisioner +// config is expected to contain the merged connection configurations. type EvalValidateProvisioner struct { - Provisioner *ResourceProvisioner - Config **ResourceConfig - ConnConfig **ResourceConfig + ResourceAddr addrs.Resource + Provisioner *provisioners.Interface + Schema **configschema.Block + Config *configs.Provisioner + ResourceHasCount bool } func (n *EvalValidateProvisioner) Eval(ctx EvalContext) (interface{}, error) { provisioner := *n.Provisioner config := *n.Config - var warns []string - var errs []error + schema := *n.Schema + + var diags tfdiags.Diagnostics { // Validate the provisioner's own config first - w, e := provisioner.Validate(config) - warns = append(warns, w...) - errs = append(errs, e...) - } - { - // Now validate the connection config, which might either be from - // the provisioner block itself or inherited from the resource's - // shared connection info. - w, e := n.validateConnConfig(*n.ConnConfig) - warns = append(warns, w...) - errs = append(errs, e...) - } + configVal, _, configDiags := n.evaluateBlock(ctx, config.Config, schema) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, diags.Err() + } - if len(warns) == 0 && len(errs) == 0 { - return nil, nil + if configVal == cty.NilVal { + // Should never happen for a well-behaved EvaluateBlock implementation + return nil, fmt.Errorf("EvaluateBlock returned nil value") + } + + req := provisioners.ValidateProvisionerConfigRequest{ + Config: configVal, + } + + resp := provisioner.ValidateProvisionerConfig(req) + diags = diags.Append(resp.Diagnostics) } - return nil, &EvalValidateError{ - Warnings: warns, - Errors: errs, + { + // Now validate the connection config, which contains the merged bodies + // of the resource and provisioner connection blocks. + connDiags := n.validateConnConfig(ctx, config.Connection, n.ResourceAddr) + diags = diags.Append(connDiags) } + + return nil, diags.NonFatalErr() } -func (n *EvalValidateProvisioner) validateConnConfig(connConfig *ResourceConfig) (warns []string, errs []error) { +func (n *EvalValidateProvisioner) validateConnConfig(ctx EvalContext, config *configs.Connection, self addrs.Referenceable) tfdiags.Diagnostics { // We can't comprehensively validate the connection config since its // final structure is decided by the communicator and we can't instantiate // that until we have a complete instance state. However, we *can* catch @@ -129,103 +166,379 @@ func (n *EvalValidateProvisioner) validateConnConfig(connConfig *ResourceConfig) // typos early rather than waiting until we actually try to run one of // the resource's provisioners. - type connConfigSuperset struct { - // All attribute types are interface{} here because at this point we - // may still have unresolved interpolation expressions, which will - // appear as strings regardless of the final goal type. + var diags tfdiags.Diagnostics - Type interface{} `mapstructure:"type"` - User interface{} `mapstructure:"user"` - Password interface{} `mapstructure:"password"` - Host interface{} `mapstructure:"host"` - Port interface{} `mapstructure:"port"` - Timeout interface{} `mapstructure:"timeout"` - ScriptPath interface{} `mapstructure:"script_path"` + if config == nil || config.Config == nil { + // No block to validate + return diags + } - // For type=ssh only (enforced in ssh communicator) - PrivateKey interface{} `mapstructure:"private_key"` - HostKey interface{} `mapstructure:"host_key"` - Agent interface{} `mapstructure:"agent"` - BastionHost interface{} `mapstructure:"bastion_host"` - BastionHostKey interface{} `mapstructure:"bastion_host_key"` - BastionPort interface{} `mapstructure:"bastion_port"` - BastionUser interface{} `mapstructure:"bastion_user"` - BastionPassword interface{} `mapstructure:"bastion_password"` - BastionPrivateKey interface{} `mapstructure:"bastion_private_key"` - AgentIdentity interface{} `mapstructure:"agent_identity"` + // We evaluate here just by evaluating the block and returning any + // diagnostics we get, since evaluation alone is enough to check for + // extraneous arguments and incorrectly-typed arguments. + _, _, configDiags := n.evaluateBlock(ctx, config.Config, connectionBlockSupersetSchema) + diags = diags.Append(configDiags) - // For type=winrm only (enforced in winrm communicator) - HTTPS interface{} `mapstructure:"https"` - Insecure interface{} `mapstructure:"insecure"` - NTLM interface{} `mapstructure:"use_ntlm"` - CACert interface{} `mapstructure:"cacert"` - } + return diags +} - var metadata mapstructure.Metadata - decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ - Metadata: &metadata, - Result: &connConfigSuperset{}, // result is disregarded; we only care about unused keys - }) - if err != nil { - // should never happen - errs = append(errs, err) - return - } +func (n *EvalValidateProvisioner) evaluateBlock(ctx EvalContext, body hcl.Body, schema *configschema.Block) (cty.Value, hcl.Body, tfdiags.Diagnostics) { + keyData := EvalDataForNoInstanceKey + selfAddr := n.ResourceAddr.Instance(addrs.NoKey) - if err := decoder.Decode(connConfig.Config); err != nil { - errs = append(errs, err) - return - } + if n.ResourceHasCount { + // For a resource that has count, we allow count.index but don't + // know at this stage what it will return. + keyData = InstanceKeyEvalData{ + CountIndex: cty.UnknownVal(cty.Number), + } - for _, attrName := range metadata.Unused { - errs = append(errs, fmt.Errorf("unknown 'connection' argument %q", attrName)) + // "self" can't point to an unknown key, but we'll force it to be + // key 0 here, which should return an unknown value of the + // expected type since none of these elements are known at this + // point anyway. + selfAddr = n.ResourceAddr.Instance(addrs.IntKey(0)) } - return + + return ctx.EvaluateBlock(body, schema, selfAddr, keyData) +} + +// connectionBlockSupersetSchema is a schema representing the superset of all +// possible arguments for "connection" blocks across all supported connection +// types. +// +// This currently lives here because we've not yet updated our communicator +// subsystem to be aware of schema itself. Once that is done, we can remove +// this and use a type-specific schema from the communicator to validate +// exactly what is expected for a given connection type. +var connectionBlockSupersetSchema = &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + // NOTE: "type" is not included here because it's treated special + // by the config loader and stored away in a separate field. + + // Common attributes for both connection types + "host": { + Type: cty.String, + Required: true, + }, + "type": { + Type: cty.String, + Optional: true, + }, + "user": { + Type: cty.String, + Optional: true, + }, + "password": { + Type: cty.String, + Optional: true, + }, + "port": { + Type: cty.String, + Optional: true, + }, + "timeout": { + Type: cty.String, + Optional: true, + }, + "script_path": { + Type: cty.String, + Optional: true, + }, + + // For type=ssh only (enforced in ssh communicator) + "private_key": { + Type: cty.String, + Optional: true, + }, + "certificate": { + Type: cty.String, + Optional: true, + }, + "host_key": { + Type: cty.String, + Optional: true, + }, + "agent": { + Type: cty.Bool, + Optional: true, + }, + "agent_identity": { + Type: cty.String, + Optional: true, + }, + "bastion_host": { + Type: cty.String, + Optional: true, + }, + "bastion_host_key": { + Type: cty.String, + Optional: true, + }, + "bastion_port": { + Type: cty.Number, + Optional: true, + }, + "bastion_user": { + Type: cty.String, + Optional: true, + }, + "bastion_password": { + Type: cty.String, + Optional: true, + }, + "bastion_private_key": { + Type: cty.String, + Optional: true, + }, + + // For type=winrm only (enforced in winrm communicator) + "https": { + Type: cty.Bool, + Optional: true, + }, + "insecure": { + Type: cty.Bool, + Optional: true, + }, + "cacert": { + Type: cty.String, + Optional: true, + }, + "use_ntlm": { + Type: cty.Bool, + Optional: true, + }, + }, +} + +// connectionBlockSupersetSchema is a schema representing the superset of all +// possible arguments for "connection" blocks across all supported connection +// types. +// +// This currently lives here because we've not yet updated our communicator +// subsystem to be aware of schema itself. It's exported only for use in the +// configs/configupgrade package and should not be used from anywhere else. +// The caller may not modify any part of the returned schema data structure. +func ConnectionBlockSupersetSchema() *configschema.Block { + return connectionBlockSupersetSchema } // EvalValidateResource is an EvalNode implementation that validates // the configuration of a resource. type EvalValidateResource struct { - Provider *ResourceProvider - Config **ResourceConfig - ResourceName string - ResourceType string - ResourceMode config.ResourceMode + Addr addrs.Resource + Provider *providers.Interface + ProviderSchema **ProviderSchema + Config *configs.Resource // IgnoreWarnings means that warnings will not be passed through. This allows // "just-in-time" passes of validation to continue execution through warnings. IgnoreWarnings bool + + // ConfigVal, if non-nil, will be updated with the value resulting from + // evaluating the given configuration body. Since validation is performed + // very early, this value is likely to contain lots of unknown values, + // but its type will conform to the schema of the resource type associated + // with the resource instance being validated. + ConfigVal *cty.Value } func (n *EvalValidateResource) Eval(ctx EvalContext) (interface{}, error) { + if n.ProviderSchema == nil || *n.ProviderSchema == nil { + return nil, fmt.Errorf("EvalValidateResource has nil schema for %s", n.Addr) + } + + var diags tfdiags.Diagnostics provider := *n.Provider cfg := *n.Config - var warns []string - var errs []error + schema := *n.ProviderSchema + mode := cfg.Mode + + keyData := EvalDataForNoInstanceKey + if n.Config.Count != nil { + // If the config block has count, we'll evaluate with an unknown + // number as count.index so we can still type check even though + // we won't expand count until the plan phase. + keyData = InstanceKeyEvalData{ + CountIndex: cty.UnknownVal(cty.Number), + } + + // Basic type-checking of the count argument. More complete validation + // of this will happen when we DynamicExpand during the plan walk. + countDiags := n.validateCount(ctx, n.Config.Count) + diags = diags.Append(countDiags) + } + + for _, traversal := range n.Config.DependsOn { + ref, refDiags := addrs.ParseRef(traversal) + diags = diags.Append(refDiags) + if len(ref.Remaining) != 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid depends_on reference", + Detail: "References in depends_on must be to a whole object (resource, etc), not to an attribute of an object.", + Subject: ref.Remaining.SourceRange().Ptr(), + }) + } + + // The ref must also refer to something that exists. To test that, + // we'll just eval it and count on the fact that our evaluator will + // detect references to non-existent objects. + if !diags.HasErrors() { + scope := ctx.EvaluationScope(nil, EvalDataForNoInstanceKey) + if scope != nil { // sometimes nil in tests, due to incomplete mocks + _, refDiags = scope.EvalReference(ref, cty.DynamicPseudoType) + diags = diags.Append(refDiags) + } + } + } + // Provider entry point varies depending on resource mode, because // managed resources and data resources are two distinct concepts // in the provider abstraction. - switch n.ResourceMode { - case config.ManagedResourceMode: - warns, errs = provider.ValidateResource(n.ResourceType, cfg) - case config.DataResourceMode: - warns, errs = provider.ValidateDataSource(n.ResourceType, cfg) - } + switch mode { + case addrs.ManagedResourceMode: + schema, _ := schema.SchemaForResourceType(mode, cfg.Type) + if schema == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource type", + Detail: fmt.Sprintf("The provider %s does not support resource type %q.", cfg.ProviderConfigAddr(), cfg.Type), + Subject: &cfg.TypeRange, + }) + return nil, diags.Err() + } + + configVal, _, valDiags := ctx.EvaluateBlock(cfg.Config, schema, nil, keyData) + diags = diags.Append(valDiags) + if valDiags.HasErrors() { + return nil, diags.Err() + } + + if cfg.Managed != nil { // can be nil only in tests with poorly-configured mocks + for _, traversal := range cfg.Managed.IgnoreChanges { + moreDiags := schema.StaticValidateTraversal(traversal) + diags = diags.Append(moreDiags) + } + } + + req := providers.ValidateResourceTypeConfigRequest{ + TypeName: cfg.Type, + Config: configVal, + } + + resp := provider.ValidateResourceTypeConfig(req) + diags = diags.Append(resp.Diagnostics.InConfigBody(cfg.Config)) + + if n.ConfigVal != nil { + *n.ConfigVal = configVal + } + + case addrs.DataResourceMode: + schema, _ := schema.SchemaForResourceType(mode, cfg.Type) + if schema == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid data source", + Detail: fmt.Sprintf("The provider %s does not support data source %q.", cfg.ProviderConfigAddr(), cfg.Type), + Subject: &cfg.TypeRange, + }) + return nil, diags.Err() + } + + configVal, _, valDiags := ctx.EvaluateBlock(cfg.Config, schema, nil, keyData) + diags = diags.Append(valDiags) + if valDiags.HasErrors() { + return nil, diags.Err() + } - // If the resource name doesn't match the name regular - // expression, show an error. - if !config.NameRegexp.Match([]byte(n.ResourceName)) { - errs = append(errs, fmt.Errorf( - "%s: resource name can only contain letters, numbers, "+ - "dashes, and underscores.", n.ResourceName)) + req := providers.ValidateDataSourceConfigRequest{ + TypeName: cfg.Type, + Config: configVal, + } + + resp := provider.ValidateDataSourceConfig(req) + diags = diags.Append(resp.Diagnostics.InConfigBody(cfg.Config)) } - if (len(warns) == 0 || n.IgnoreWarnings) && len(errs) == 0 { + if n.IgnoreWarnings { + // If we _only_ have warnings then we'll return nil. + if diags.HasErrors() { + return nil, diags.NonFatalErr() + } return nil, nil + } else { + // We'll return an error if there are any diagnostics at all, even if + // some of them are warnings. + return nil, diags.NonFatalErr() + } +} + +func (n *EvalValidateResource) validateCount(ctx EvalContext, expr hcl.Expression) tfdiags.Diagnostics { + if expr == nil { + return nil + } + + var diags tfdiags.Diagnostics + + countVal, countDiags := ctx.EvaluateExpr(expr, cty.Number, nil) + diags = diags.Append(countDiags) + if diags.HasErrors() { + return diags + } + + if countVal.IsNull() { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: `The given "count" argument value is null. An integer is required.`, + Subject: expr.Range().Ptr(), + }) + return diags } - return nil, &EvalValidateError{ - Warnings: warns, - Errors: errs, + var err error + countVal, err = convert.Convert(countVal, cty.Number) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err), + Subject: expr.Range().Ptr(), + }) + return diags } + + // If the value isn't known then that's the best we can do for now, but + // we'll check more thoroughly during the plan walk. + if !countVal.IsKnown() { + return diags + } + + // If we _do_ know the value, then we can do a few more checks here. + var count int + err = gocty.FromCtyValue(countVal, &count) + if err != nil { + // Isn't a whole number, etc. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err), + Subject: expr.Range().Ptr(), + }) + return diags + } + + if count < 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: `The given "count" argument value is unsuitable: count cannot be negative.`, + Subject: expr.Range().Ptr(), + }) + return diags + } + + return diags } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go index ae4436a2ee..edd604fa77 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go @@ -3,72 +3,65 @@ package terraform import ( "fmt" - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/hcl2/hcl" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/lang" + "github.com/hashicorp/terraform/tfdiags" ) -// EvalValidateResourceSelfRef is an EvalNode implementation that validates that -// a configuration doesn't contain a reference to the resource itself. -// -// This must be done prior to interpolating configuration in order to avoid -// any infinite loop scenarios. -type EvalValidateResourceSelfRef struct { - Addr **ResourceAddress - Config **config.RawConfig +// EvalValidateSelfRef is an EvalNode implementation that checks to ensure that +// expressions within a particular referencable block do not reference that +// same block. +type EvalValidateSelfRef struct { + Addr addrs.Referenceable + Config hcl.Body + ProviderSchema **ProviderSchema } -func (n *EvalValidateResourceSelfRef) Eval(ctx EvalContext) (interface{}, error) { - addr := *n.Addr - conf := *n.Config - - // Go through the variables and find self references - var errs []error - for k, raw := range conf.Variables { - rv, ok := raw.(*config.ResourceVariable) - if !ok { - continue - } - - // Build an address from the variable - varAddr := &ResourceAddress{ - Path: addr.Path, - Mode: rv.Mode, - Type: rv.Type, - Name: rv.Name, - Index: rv.Index, - InstanceType: TypePrimary, - } +func (n *EvalValidateSelfRef) Eval(ctx EvalContext) (interface{}, error) { + var diags tfdiags.Diagnostics + addr := n.Addr - // If the variable access is a multi-access (*), then we just - // match the index so that we'll match our own addr if everything - // else matches. - if rv.Multi && rv.Index == -1 { - varAddr.Index = addr.Index - } + addrStrs := make([]string, 0, 1) + addrStrs = append(addrStrs, addr.String()) + switch tAddr := addr.(type) { + case addrs.ResourceInstance: + // A resource instance may not refer to its containing resource either. + addrStrs = append(addrStrs, tAddr.ContainingResource().String()) + } - // This is a weird thing where ResourceAddres has index "-1" when - // index isn't set at all. This means index "0" for resource access. - // So, if we have this scenario, just set our varAddr to -1 so it - // matches. - if addr.Index == -1 && varAddr.Index == 0 { - varAddr.Index = -1 - } + if n.ProviderSchema == nil || *n.ProviderSchema == nil { + return nil, fmt.Errorf("provider schema unavailable while validating %s for self-references; this is a bug in Terraform and should be reported", addr) + } - // If the addresses match, then this is a self reference - if varAddr.Equals(addr) && varAddr.Index == addr.Index { - errs = append(errs, fmt.Errorf( - "%s: self reference not allowed: %q", - addr, k)) - } + providerSchema := *n.ProviderSchema + var schema *configschema.Block + switch tAddr := addr.(type) { + case addrs.Resource: + schema, _ = providerSchema.SchemaForResourceAddr(tAddr) + case addrs.ResourceInstance: + schema, _ = providerSchema.SchemaForResourceAddr(tAddr.ContainingResource()) } - // If no errors, no errors! - if len(errs) == 0 { - return nil, nil + if schema == nil { + return nil, fmt.Errorf("no schema available for %s to validate for self-references; this is a bug in Terraform and should be reported", addr) } - // Wrap the errors in the proper wrapper so we can handle validation - // formatting properly upstream. - return nil, &EvalValidateError{ - Errors: errs, + refs, _ := lang.ReferencesInBlock(n.Config, schema) + for _, ref := range refs { + for _, addrStr := range addrStrs { + if ref.Subject.String() == addrStr { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Self-referential block", + Detail: fmt.Sprintf("Configuration for %s may not refer to itself.", addrStr), + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + } + } } + + return nil, diags.NonFatalErr() } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go index e39a33c2a9..68adf764df 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go @@ -4,12 +4,17 @@ import ( "fmt" "log" "reflect" - "strconv" "strings" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/configs" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/helper/hilmapstructure" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" ) // EvalTypeCheckVariable is an EvalNode which ensures that the variable @@ -93,166 +98,88 @@ func (n *EvalTypeCheckVariable) Eval(ctx EvalContext) (interface{}, error) { return nil, nil } -// EvalSetVariables is an EvalNode implementation that sets the variables -// explicitly for interpolation later. -type EvalSetVariables struct { - Module *string - Variables map[string]interface{} +// EvalSetModuleCallArguments is an EvalNode implementation that sets values +// for arguments of a child module call, for later retrieval during +// expression evaluation. +type EvalSetModuleCallArguments struct { + Module addrs.ModuleCallInstance + Values map[string]cty.Value } // TODO: test -func (n *EvalSetVariables) Eval(ctx EvalContext) (interface{}, error) { - ctx.SetVariables(*n.Module, n.Variables) +func (n *EvalSetModuleCallArguments) Eval(ctx EvalContext) (interface{}, error) { + ctx.SetModuleCallArguments(n.Module, n.Values) return nil, nil } -// EvalVariableBlock is an EvalNode implementation that evaluates the -// given configuration, and uses the final values as a way to set the -// mapping. -type EvalVariableBlock struct { - Config **ResourceConfig - VariableValues map[string]interface{} +// EvalModuleCallArgument is an EvalNode implementation that produces the value +// for a particular variable as will be used by a child module instance. +// +// The result is written into the map given in Values, with its key +// set to the local name of the variable, disregarding the module instance +// address. Any existing values in that map are deleted first. This weird +// interface is a result of trying to be convenient for use with +// EvalContext.SetModuleCallArguments, which expects a map to merge in with +// any existing arguments. +type EvalModuleCallArgument struct { + Addr addrs.InputVariable + Config *configs.Variable + Expr hcl.Expression + + // If this flag is set, any diagnostics are discarded and this operation + // will always succeed, though may produce an unknown value in the + // event of an error. + IgnoreDiagnostics bool + + Values map[string]cty.Value } -func (n *EvalVariableBlock) Eval(ctx EvalContext) (interface{}, error) { +func (n *EvalModuleCallArgument) Eval(ctx EvalContext) (interface{}, error) { // Clear out the existing mapping - for k, _ := range n.VariableValues { - delete(n.VariableValues, k) - } - - // Get our configuration - rc := *n.Config - for k, v := range rc.Config { - vKind := reflect.ValueOf(v).Type().Kind() - - switch vKind { - case reflect.Slice: - var vSlice []interface{} - if err := hilmapstructure.WeakDecode(v, &vSlice); err == nil { - n.VariableValues[k] = vSlice - continue - } - case reflect.Map: - var vMap map[string]interface{} - if err := hilmapstructure.WeakDecode(v, &vMap); err == nil { - n.VariableValues[k] = vMap - continue - } - default: - var vString string - if err := hilmapstructure.WeakDecode(v, &vString); err == nil { - n.VariableValues[k] = vString - continue - } - } - - return nil, fmt.Errorf("Variable value for %s is not a string, list or map type", k) - } - - for _, path := range rc.ComputedKeys { - log.Printf("[DEBUG] Setting Unknown Variable Value for computed key: %s", path) - err := n.setUnknownVariableValueForPath(path) - if err != nil { - return nil, err - } + for k := range n.Values { + delete(n.Values, k) } - return nil, nil -} - -func (n *EvalVariableBlock) setUnknownVariableValueForPath(path string) error { - pathComponents := strings.Split(path, ".") - - if len(pathComponents) < 1 { - return fmt.Errorf("No path comoponents in %s", path) + wantType := n.Config.Type + name := n.Addr.Name + expr := n.Expr + + if expr == nil { + // Should never happen, but we'll bail out early here rather than + // crash in case it does. We set no value at all in this case, + // making a subsequent call to EvalContext.SetModuleCallArguments + // a no-op. + log.Printf("[ERROR] attempt to evaluate %s with nil expression", n.Addr.String()) + return nil, nil } - if len(pathComponents) == 1 { - // Special case the "top level" since we know the type - if _, ok := n.VariableValues[pathComponents[0]]; !ok { - n.VariableValues[pathComponents[0]] = config.UnknownVariableValue - } - return nil + val, diags := ctx.EvaluateExpr(expr, cty.DynamicPseudoType, nil) + + // We intentionally passed DynamicPseudoType to EvaluateExpr above because + // now we can do our own local type conversion and produce an error message + // with better context if it fails. + var convErr error + val, convErr = convert.Convert(val, wantType) + if convErr != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid value for module argument", + Detail: fmt.Sprintf( + "The given value is not suitable for child module variable %q defined at %s: %s.", + name, n.Config.DeclRange.String(), convErr, + ), + Subject: expr.Range().Ptr(), + }) + // We'll return a placeholder unknown value to avoid producing + // redundant downstream errors. + val = cty.UnknownVal(wantType) } - // Otherwise find the correct point in the tree and then set to unknown - var current interface{} = n.VariableValues[pathComponents[0]] - for i := 1; i < len(pathComponents); i++ { - switch tCurrent := current.(type) { - case []interface{}: - index, err := strconv.Atoi(pathComponents[i]) - if err != nil { - return fmt.Errorf("Cannot convert %s to slice index in path %s", - pathComponents[i], path) - } - current = tCurrent[index] - case []map[string]interface{}: - index, err := strconv.Atoi(pathComponents[i]) - if err != nil { - return fmt.Errorf("Cannot convert %s to slice index in path %s", - pathComponents[i], path) - } - current = tCurrent[index] - case map[string]interface{}: - if val, hasVal := tCurrent[pathComponents[i]]; hasVal { - current = val - continue - } - - tCurrent[pathComponents[i]] = config.UnknownVariableValue - break - } + n.Values[name] = val + if n.IgnoreDiagnostics { + return nil, nil } - - return nil -} - -// EvalCoerceMapVariable is an EvalNode implementation that recognizes a -// specific ambiguous HCL parsing situation and resolves it. In HCL parsing, a -// bare map literal is indistinguishable from a list of maps w/ one element. -// -// We take all the same inputs as EvalTypeCheckVariable above, since we need -// both the target type and the proposed value in order to properly coerce. -type EvalCoerceMapVariable struct { - Variables map[string]interface{} - ModulePath []string - ModuleTree *module.Tree -} - -// Eval implements the EvalNode interface. See EvalCoerceMapVariable for -// details. -func (n *EvalCoerceMapVariable) Eval(ctx EvalContext) (interface{}, error) { - currentTree := n.ModuleTree - for _, pathComponent := range n.ModulePath[1:] { - currentTree = currentTree.Children()[pathComponent] - } - targetConfig := currentTree.Config() - - prototypes := make(map[string]config.VariableType) - for _, variable := range targetConfig.Variables { - prototypes[variable.Name] = variable.Type() - } - - for name, declaredType := range prototypes { - if declaredType != config.VariableTypeMap { - continue - } - - proposedValue, ok := n.Variables[name] - if !ok { - continue - } - - if list, ok := proposedValue.([]interface{}); ok && len(list) == 1 { - if m, ok := list[0].(map[string]interface{}); ok { - log.Printf("[DEBUG] EvalCoerceMapVariable: "+ - "Coercing single element list into map: %#v", m) - n.Variables[name] = m - } - } - } - - return nil, nil + return nil, diags.ErrWithWarnings() } // hclTypeName returns the name of the type that would represent this value in diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go index 0c3da48f0d..6b4df67aaf 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go +++ b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go @@ -1,48 +1,34 @@ package terraform import ( - "strings" - - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/providers" ) // ProviderEvalTree returns the evaluation tree for initializing and // configuring providers. -func ProviderEvalTree(n *NodeApplyableProvider, config *config.ProviderConfig) EvalNode { - var provider ResourceProvider - var resourceConfig *ResourceConfig +func ProviderEvalTree(n *NodeApplyableProvider, config *configs.Provider) EvalNode { + var provider providers.Interface - typeName := strings.SplitN(n.NameValue, ".", 2)[0] + addr := n.Addr + relAddr := addr.ProviderConfig seq := make([]EvalNode, 0, 5) seq = append(seq, &EvalInitProvider{ - TypeName: typeName, - Name: n.Name(), + TypeName: relAddr.Type, + Addr: addr.ProviderConfig, }) // Input stuff seq = append(seq, &EvalOpFilter{ - Ops: []walkOperation{walkInput, walkImport}, + Ops: []walkOperation{walkImport}, Node: &EvalSequence{ Nodes: []EvalNode{ &EvalGetProvider{ - Name: n.Name(), + Addr: addr, Output: &provider, }, - &EvalInterpolateProvider{ - Config: config, - Output: &resourceConfig, - }, - &EvalBuildProviderConfig{ - Provider: n.NameValue, - Config: &resourceConfig, - Output: &resourceConfig, - }, - &EvalInputProvider{ - Name: n.NameValue, - Provider: &provider, - Config: &resourceConfig, - }, }, }, }) @@ -52,21 +38,13 @@ func ProviderEvalTree(n *NodeApplyableProvider, config *config.ProviderConfig) E Node: &EvalSequence{ Nodes: []EvalNode{ &EvalGetProvider{ - Name: n.Name(), + Addr: addr, Output: &provider, }, - &EvalInterpolateProvider{ - Config: config, - Output: &resourceConfig, - }, - &EvalBuildProviderConfig{ - Provider: n.NameValue, - Config: &resourceConfig, - Output: &resourceConfig, - }, &EvalValidateProvider{ + Addr: relAddr, Provider: &provider, - Config: &resourceConfig, + Config: config, }, }, }, @@ -78,18 +56,9 @@ func ProviderEvalTree(n *NodeApplyableProvider, config *config.ProviderConfig) E Node: &EvalSequence{ Nodes: []EvalNode{ &EvalGetProvider{ - Name: n.Name(), + Addr: addr, Output: &provider, }, - &EvalInterpolateProvider{ - Config: config, - Output: &resourceConfig, - }, - &EvalBuildProviderConfig{ - Provider: n.NameValue, - Config: &resourceConfig, - Output: &resourceConfig, - }, }, }, }) @@ -101,8 +70,9 @@ func ProviderEvalTree(n *NodeApplyableProvider, config *config.ProviderConfig) E Node: &EvalSequence{ Nodes: []EvalNode{ &EvalConfigProvider{ - Provider: n.Name(), - Config: &resourceConfig, + Addr: relAddr, + Provider: &provider, + Config: config, }, }, }, @@ -113,6 +83,6 @@ func ProviderEvalTree(n *NodeApplyableProvider, config *config.ProviderConfig) E // CloseProviderEvalTree returns the evaluation tree for closing // provider connections that aren't needed anymore. -func CloseProviderEvalTree(n string) EvalNode { - return &EvalCloseProvider{Name: n} +func CloseProviderEvalTree(addr addrs.AbsProviderConfig) EvalNode { + return &EvalCloseProvider{Addr: addr.ProviderConfig} } diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaluate.go b/vendor/github.com/hashicorp/terraform/terraform/evaluate.go new file mode 100644 index 0000000000..ab65d475b1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/evaluate.go @@ -0,0 +1,933 @@ +package terraform + +import ( + "fmt" + "log" + "os" + "path/filepath" + "strconv" + "sync" + + "github.com/agext/levenshtein" + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/lang" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" +) + +// Evaluator provides the necessary contextual data for evaluating expressions +// for a particular walk operation. +type Evaluator struct { + // Operation defines what type of operation this evaluator is being used + // for. + Operation walkOperation + + // Meta is contextual metadata about the current operation. + Meta *ContextMeta + + // Config is the root node in the configuration tree. + Config *configs.Config + + // VariableValues is a map from variable names to their associated values, + // within the module indicated by ModulePath. VariableValues is modified + // concurrently, and so it must be accessed only while holding + // VariableValuesLock. + // + // The first map level is string representations of addr.ModuleInstance + // values, while the second level is variable names. + VariableValues map[string]map[string]cty.Value + VariableValuesLock *sync.Mutex + + // Schemas is a repository of all of the schemas we should need to + // evaluate expressions. This must be constructed by the caller to + // include schemas for all of the providers, resource types, data sources + // and provisioners used by the given configuration and state. + // + // This must not be mutated during evaluation. + Schemas *Schemas + + // State is the current state, embedded in a wrapper that ensures that + // it can be safely accessed and modified concurrently. + State *states.SyncState + + // Changes is the set of proposed changes, embedded in a wrapper that + // ensures they can be safely accessed and modified concurrently. + Changes *plans.ChangesSync +} + +// Scope creates an evaluation scope for the given module path and optional +// resource. +// +// If the "self" argument is nil then the "self" object is not available +// in evaluated expressions. Otherwise, it behaves as an alias for the given +// address. +func (e *Evaluator) Scope(data lang.Data, self addrs.Referenceable) *lang.Scope { + return &lang.Scope{ + Data: data, + SelfAddr: self, + PureOnly: e.Operation != walkApply && e.Operation != walkDestroy, + BaseDir: ".", // Always current working directory for now. + } +} + +// evaluationStateData is an implementation of lang.Data that resolves +// references primarily (but not exclusively) using information from a State. +type evaluationStateData struct { + Evaluator *Evaluator + + // ModulePath is the path through the dynamic module tree to the module + // that references will be resolved relative to. + ModulePath addrs.ModuleInstance + + // InstanceKeyData describes the values, if any, that are accessible due + // to repetition of a containing object using "count" or "for_each" + // arguments. (It is _not_ used for the for_each inside "dynamic" blocks, + // since the user specifies in that case which variable name to locally + // shadow.) + InstanceKeyData InstanceKeyEvalData + + // Operation records the type of walk the evaluationStateData is being used + // for. + Operation walkOperation +} + +// InstanceKeyEvalData is used during evaluation to specify which values, +// if any, should be produced for count.index, each.key, and each.value. +type InstanceKeyEvalData struct { + // CountIndex is the value for count.index, or cty.NilVal if evaluating + // in a context where the "count" argument is not active. + // + // For correct operation, this should always be of type cty.Number if not + // nil. + CountIndex cty.Value + + // EachKey and EachValue are the values for each.key and each.value + // respectively, or cty.NilVal if evaluating in a context where the + // "for_each" argument is not active. These must either both be set + // or neither set. + // + // For correct operation, EachKey must always be either of type cty.String + // or cty.Number if not nil. + EachKey, EachValue cty.Value +} + +// EvalDataForInstanceKey constructs a suitable InstanceKeyEvalData for +// evaluating in a context that has the given instance key. +func EvalDataForInstanceKey(key addrs.InstanceKey) InstanceKeyEvalData { + // At the moment we don't actually implement for_each, so we only + // ever populate CountIndex. + // (When we implement for_each later we may need to reorganize this some, + // so that we can resolve the ambiguity that an int key may either be + // a count.index or an each.key where for_each is over a list.) + + var countIdx cty.Value + if intKey, ok := key.(addrs.IntKey); ok { + countIdx = cty.NumberIntVal(int64(intKey)) + } + + return InstanceKeyEvalData{ + CountIndex: countIdx, + } +} + +// EvalDataForNoInstanceKey is a value of InstanceKeyData that sets no instance +// key values at all, suitable for use in contexts where no keyed instance +// is relevant. +var EvalDataForNoInstanceKey = InstanceKeyEvalData{} + +// evaluationStateData must implement lang.Data +var _ lang.Data = (*evaluationStateData)(nil) + +func (d *evaluationStateData) GetCountAttr(addr addrs.CountAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + switch addr.Name { + + case "index": + idxVal := d.InstanceKeyData.CountIndex + if idxVal == cty.NilVal { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to "count" in non-counted context`, + Detail: fmt.Sprintf(`The "count" object can be used only in "resource" and "data" blocks, and only when the "count" argument is set.`), + Subject: rng.ToHCL().Ptr(), + }) + return cty.UnknownVal(cty.Number), diags + } + return idxVal, diags + + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "count" attribute`, + Detail: fmt.Sprintf(`The "count" object does not have an attribute named %q. The only supported attribute is count.index, which is the index of each instance of a resource block that has the "count" argument set.`, addr.Name), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } +} + +func (d *evaluationStateData) GetInputVariable(addr addrs.InputVariable, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // First we'll make sure the requested value is declared in configuration, + // so we can produce a nice message if not. + moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath) + if moduleConfig == nil { + // should never happen, since we can't be evaluating in a module + // that wasn't mentioned in configuration. + panic(fmt.Sprintf("input variable read from %s, which has no configuration", d.ModulePath)) + } + + config := moduleConfig.Module.Variables[addr.Name] + if config == nil { + var suggestions []string + for k := range moduleConfig.Module.Variables { + suggestions = append(suggestions, k) + } + suggestion := nameSuggestion(addr.Name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } else { + suggestion = fmt.Sprintf(" This variable can be declared with a variable %q {} block.", addr.Name) + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared input variable`, + Detail: fmt.Sprintf(`An input variable with the name %q has not been declared.%s`, addr.Name, suggestion), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + wantType := cty.DynamicPseudoType + if config.Type != cty.NilType { + wantType = config.Type + } + + d.Evaluator.VariableValuesLock.Lock() + defer d.Evaluator.VariableValuesLock.Unlock() + + // During the validate walk, input variables are always unknown so + // that we are validating the configuration for all possible input values + // rather than for a specific set. Checking against a specific set of + // input values then happens during the plan walk. + // + // This is important because otherwise the validation walk will tend to be + // overly strict, requiring expressions throughout the configuration to + // be complicated to accommodate all possible inputs, whereas returning + // known here allows for simpler patterns like using input values as + // guards to broadly enable/disable resources, avoid processing things + // that are disabled, etc. Terraform's static validation leans towards + // being liberal in what it accepts because the subsequent plan walk has + // more information available and so can be more conservative. + if d.Operation == walkValidate { + return cty.UnknownVal(wantType), diags + } + + moduleAddrStr := d.ModulePath.String() + vals := d.Evaluator.VariableValues[moduleAddrStr] + if vals == nil { + return cty.UnknownVal(wantType), diags + } + + val, isSet := vals[addr.Name] + if !isSet { + if config.Default != cty.NilVal { + return config.Default, diags + } + return cty.UnknownVal(wantType), diags + } + + var err error + val, err = convert.Convert(val, wantType) + if err != nil { + // We should never get here because this problem should've been caught + // during earlier validation, but we'll do something reasonable anyway. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Incorrect variable type`, + Detail: fmt.Sprintf(`The resolved value of variable %q is not appropriate: %s.`, addr.Name, err), + Subject: &config.DeclRange, + }) + // Stub out our return value so that the semantic checker doesn't + // produce redundant downstream errors. + val = cty.UnknownVal(wantType) + } + + return val, diags +} + +func (d *evaluationStateData) GetLocalValue(addr addrs.LocalValue, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // First we'll make sure the requested value is declared in configuration, + // so we can produce a nice message if not. + moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath) + if moduleConfig == nil { + // should never happen, since we can't be evaluating in a module + // that wasn't mentioned in configuration. + panic(fmt.Sprintf("local value read from %s, which has no configuration", d.ModulePath)) + } + + config := moduleConfig.Module.Locals[addr.Name] + if config == nil { + var suggestions []string + for k := range moduleConfig.Module.Locals { + suggestions = append(suggestions, k) + } + suggestion := nameSuggestion(addr.Name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared local value`, + Detail: fmt.Sprintf(`A local value with the name %q has not been declared.%s`, addr.Name, suggestion), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + val := d.Evaluator.State.LocalValue(addr.Absolute(d.ModulePath)) + if val == cty.NilVal { + // Not evaluated yet? + val = cty.DynamicVal + } + + return val, diags +} + +func (d *evaluationStateData) GetModuleInstance(addr addrs.ModuleCallInstance, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Output results live in the module that declares them, which is one of + // the child module instances of our current module path. + moduleAddr := addr.ModuleInstance(d.ModulePath) + + // We'll consult the configuration to see what output names we are + // expecting, so we can ensure the resulting object is of the expected + // type even if our data is incomplete for some reason. + moduleConfig := d.Evaluator.Config.DescendentForInstance(moduleAddr) + if moduleConfig == nil { + // should never happen, since this should've been caught during + // static validation. + panic(fmt.Sprintf("output value read from %s, which has no configuration", moduleAddr)) + } + outputConfigs := moduleConfig.Module.Outputs + + vals := map[string]cty.Value{} + for n := range outputConfigs { + addr := addrs.OutputValue{Name: n}.Absolute(moduleAddr) + + // If a pending change is present in our current changeset then its value + // takes priority over what's in state. (It will usually be the same but + // will differ if the new value is unknown during planning.) + if changeSrc := d.Evaluator.Changes.GetOutputChange(addr); changeSrc != nil { + change, err := changeSrc.Decode() + if err != nil { + // This should happen only if someone has tampered with a plan + // file, so we won't bother with a pretty error for it. + diags = diags.Append(fmt.Errorf("planned change for %s could not be decoded: %s", addr, err)) + vals[n] = cty.DynamicVal + continue + } + // We care only about the "after" value, which is the value this output + // will take on after the plan is applied. + vals[n] = change.After + } else { + os := d.Evaluator.State.OutputValue(addr) + if os == nil { + // Not evaluated yet? + vals[n] = cty.DynamicVal + continue + } + vals[n] = os.Value + } + } + return cty.ObjectVal(vals), diags +} + +func (d *evaluationStateData) GetModuleInstanceOutput(addr addrs.ModuleCallOutput, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Output results live in the module that declares them, which is one of + // the child module instances of our current module path. + absAddr := addr.AbsOutputValue(d.ModulePath) + moduleAddr := absAddr.Module + + // First we'll consult the configuration to see if an output of this + // name is declared at all. + moduleConfig := d.Evaluator.Config.DescendentForInstance(moduleAddr) + if moduleConfig == nil { + // this doesn't happen in normal circumstances due to our validation + // pass, but it can turn up in some unusual situations, like in the + // "terraform console" repl where arbitrary expressions can be + // evaluated. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared module`, + Detail: fmt.Sprintf(`The configuration contains no %s.`, moduleAddr), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + config := moduleConfig.Module.Outputs[addr.Name] + if config == nil { + var suggestions []string + for k := range moduleConfig.Module.Outputs { + suggestions = append(suggestions, k) + } + suggestion := nameSuggestion(addr.Name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared output value`, + Detail: fmt.Sprintf(`An output value with the name %q has not been declared in %s.%s`, addr.Name, moduleDisplayAddr(moduleAddr), suggestion), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + // If a pending change is present in our current changeset then its value + // takes priority over what's in state. (It will usually be the same but + // will differ if the new value is unknown during planning.) + if changeSrc := d.Evaluator.Changes.GetOutputChange(absAddr); changeSrc != nil { + change, err := changeSrc.Decode() + if err != nil { + // This should happen only if someone has tampered with a plan + // file, so we won't bother with a pretty error for it. + diags = diags.Append(fmt.Errorf("planned change for %s could not be decoded: %s", absAddr, err)) + return cty.DynamicVal, diags + } + // We care only about the "after" value, which is the value this output + // will take on after the plan is applied. + return change.After, diags + } + + os := d.Evaluator.State.OutputValue(absAddr) + if os == nil { + // Not evaluated yet? + return cty.DynamicVal, diags + } + + return os.Value, diags +} + +func (d *evaluationStateData) GetPathAttr(addr addrs.PathAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + switch addr.Name { + + case "cwd": + wd, err := os.Getwd() + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Failed to get working directory`, + Detail: fmt.Sprintf(`The value for path.cwd cannot be determined due to a system error: %s`, err), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + return cty.StringVal(filepath.ToSlash(wd)), diags + + case "module": + moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath) + if moduleConfig == nil { + // should never happen, since we can't be evaluating in a module + // that wasn't mentioned in configuration. + panic(fmt.Sprintf("module.path read from module %s, which has no configuration", d.ModulePath)) + } + sourceDir := moduleConfig.Module.SourceDir + return cty.StringVal(filepath.ToSlash(sourceDir)), diags + + case "root": + sourceDir := d.Evaluator.Config.Module.SourceDir + return cty.StringVal(filepath.ToSlash(sourceDir)), diags + + default: + suggestion := nameSuggestion(addr.Name, []string{"cwd", "module", "root"}) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "path" attribute`, + Detail: fmt.Sprintf(`The "path" object does not have an attribute named %q.%s`, addr.Name, suggestion), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } +} + +func (d *evaluationStateData) GetResourceInstance(addr addrs.ResourceInstance, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Although we are giving a ResourceInstance address here, if it has + // a key of addrs.NoKey then it might actually be a request for all of + // the instances of a particular resource. The reference resolver can't + // resolve the ambiguity itself, so we must do it in here. + + // First we'll consult the configuration to see if an resource of this + // name is declared at all. + moduleAddr := d.ModulePath + moduleConfig := d.Evaluator.Config.DescendentForInstance(moduleAddr) + if moduleConfig == nil { + // should never happen, since we can't be evaluating in a module + // that wasn't mentioned in configuration. + panic(fmt.Sprintf("resource value read from %s, which has no configuration", moduleAddr)) + } + + config := moduleConfig.Module.ResourceByAddr(addr.ContainingResource()) + if config == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared resource`, + Detail: fmt.Sprintf(`A resource %q %q has not been declared in %s`, addr.Resource.Type, addr.Resource.Name, moduleDisplayAddr(moduleAddr)), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + // First we'll find the state for the resource as a whole, and decide + // from there whether we're going to interpret the given address as a + // resource or a resource instance address. + rs := d.Evaluator.State.Resource(addr.ContainingResource().Absolute(d.ModulePath)) + + if rs == nil { + schema := d.getResourceSchema(addr.ContainingResource(), config.ProviderConfigAddr().Absolute(d.ModulePath)) + + // If it doesn't exist at all then we can't reliably determine whether + // single-instance or whole-resource interpretation was intended, but + // we can decide this partially... + if addr.Key != addrs.NoKey { + // If there's an instance key then the user must be intending + // single-instance interpretation, and so we can return a + // properly-typed unknown value to help with type checking. + return cty.UnknownVal(schema.ImpliedType()), diags + } + + // otherwise we must return DynamicVal so that both interpretations + // can proceed without generating errors, and we'll deal with this + // in a later step where more information is gathered. + // (In practice we should only end up here during the validate walk, + // since later walks should have at least partial states populated + // for all resources in the configuration.) + return cty.DynamicVal, diags + } + + // Break out early during validation, because resource may not be expanded + // yet and indexed references may show up as invalid. + if d.Operation == walkValidate { + return cty.DynamicVal, diags + } + + schema := d.getResourceSchema(addr.ContainingResource(), rs.ProviderConfig) + + // If we are able to automatically convert to the "right" type of instance + // key for this each mode then we'll do so, to match with how we generally + // treat values elsewhere in the language. This allows code below to + // assume that any possible conversions have already been dealt with and + // just worry about validation. + key := d.coerceInstanceKey(addr.Key, rs.EachMode) + + multi := false + + switch rs.EachMode { + case states.NoEach: + if key != addrs.NoKey { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource index", + Detail: fmt.Sprintf("Resource %s does not have either \"count\" or \"for_each\" set, so it cannot be indexed.", addr.ContainingResource()), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + case states.EachList: + multi = key == addrs.NoKey + if _, ok := addr.Key.(addrs.IntKey); !multi && !ok { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource index", + Detail: fmt.Sprintf("Resource %s must be indexed with a number value.", addr.ContainingResource()), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + case states.EachMap: + multi = key == addrs.NoKey + if _, ok := addr.Key.(addrs.IntKey); !multi && !ok { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource index", + Detail: fmt.Sprintf("Resource %s must be indexed with a string value.", addr.ContainingResource()), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + } + + if !multi { + log.Printf("[TRACE] GetResourceInstance: %s is a single instance", addr) + is := rs.Instance(key) + if is == nil { + return cty.UnknownVal(schema.ImpliedType()), diags + } + return d.getResourceInstanceSingle(addr, rng, is, config, rs.ProviderConfig) + } + + log.Printf("[TRACE] GetResourceInstance: %s has multiple keyed instances", addr) + return d.getResourceInstancesAll(addr.ContainingResource(), rng, config, rs, rs.ProviderConfig) +} + +func (d *evaluationStateData) getResourceInstanceSingle(addr addrs.ResourceInstance, rng tfdiags.SourceRange, is *states.ResourceInstance, config *configs.Resource, providerAddr addrs.AbsProviderConfig) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + schema := d.getResourceSchema(addr.ContainingResource(), providerAddr) + if schema == nil { + // This shouldn't happen, since validation before we get here should've + // taken care of it, but we'll show a reasonable error message anyway. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Missing resource type schema`, + Detail: fmt.Sprintf("No schema is available for %s in %s. This is a bug in Terraform and should be reported.", addr, providerAddr), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + ty := schema.ImpliedType() + if is == nil || is.Current == nil { + // Assume we're dealing with an instance that hasn't been created yet. + return cty.UnknownVal(ty), diags + } + + if is.Current.Status == states.ObjectPlanned { + // If there's a pending change for this instance in our plan, we'll prefer + // that. This is important because the state can't represent unknown values + // and so its data is inaccurate when changes are pending. + if change := d.Evaluator.Changes.GetResourceInstanceChange(addr.Absolute(d.ModulePath), states.CurrentGen); change != nil { + val, err := change.After.Decode(ty) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource instance data in plan", + Detail: fmt.Sprintf("Instance %s data could not be decoded from the plan: %s.", addr.Absolute(d.ModulePath), err), + Subject: &config.DeclRange, + }) + return cty.UnknownVal(ty), diags + } + return val, diags + } else { + // If the object is in planned status then we should not + // get here, since we should've found a pending value + // in the plan above instead. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing pending object in plan", + Detail: fmt.Sprintf("Instance %s is marked as having a change pending but that change is not recorded in the plan. This is a bug in Terraform; please report it.", addr), + Subject: &config.DeclRange, + }) + return cty.UnknownVal(ty), diags + } + } + + ios, err := is.Current.Decode(ty) + if err != nil { + // This shouldn't happen, since by the time we get here + // we should've upgraded the state data already. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource instance data in state", + Detail: fmt.Sprintf("Instance %s data could not be decoded from the state: %s.", addr.Absolute(d.ModulePath), err), + Subject: &config.DeclRange, + }) + return cty.UnknownVal(ty), diags + } + + return ios.Value, diags +} + +func (d *evaluationStateData) getResourceInstancesAll(addr addrs.Resource, rng tfdiags.SourceRange, config *configs.Resource, rs *states.Resource, providerAddr addrs.AbsProviderConfig) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + schema := d.getResourceSchema(addr, providerAddr) + if schema == nil { + // This shouldn't happen, since validation before we get here should've + // taken care of it, but we'll show a reasonable error message anyway. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Missing resource type schema`, + Detail: fmt.Sprintf("No schema is available for %s in %s. This is a bug in Terraform and should be reported.", addr, providerAddr), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + switch rs.EachMode { + + case states.EachList: + // We need to infer the length of our resulting tuple by searching + // for the max IntKey in our instances map. + length := 0 + for k := range rs.Instances { + if ik, ok := k.(addrs.IntKey); ok { + if int(ik) >= length { + length = int(ik) + 1 + } + } + } + + vals := make([]cty.Value, length) + for i := 0; i < length; i++ { + ty := schema.ImpliedType() + key := addrs.IntKey(i) + is, exists := rs.Instances[key] + if exists { + instAddr := addr.Instance(key).Absolute(d.ModulePath) + + // Prefer pending value in plan if present. See getResourceInstanceSingle + // comment for the rationale. + if is.Current.Status == states.ObjectPlanned { + if change := d.Evaluator.Changes.GetResourceInstanceChange(instAddr, states.CurrentGen); change != nil { + val, err := change.After.Decode(ty) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource instance data in plan", + Detail: fmt.Sprintf("Instance %s data could not be decoded from the plan: %s.", instAddr, err), + Subject: &config.DeclRange, + }) + continue + } + vals[i] = val + continue + } else { + // If the object is in planned status then we should not + // get here, since we should've found a pending value + // in the plan above instead. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing pending object in plan", + Detail: fmt.Sprintf("Instance %s is marked as having a change pending but that change is not recorded in the plan. This is a bug in Terraform; please report it.", instAddr), + Subject: &config.DeclRange, + }) + continue + } + } + + ios, err := is.Current.Decode(ty) + if err != nil { + // This shouldn't happen, since by the time we get here + // we should've upgraded the state data already. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource instance data in state", + Detail: fmt.Sprintf("Instance %s data could not be decoded from the state: %s.", instAddr, err), + Subject: &config.DeclRange, + }) + continue + } + vals[i] = ios.Value + } else { + // There shouldn't normally be "gaps" in our list but we'll + // allow it under the assumption that we're in a weird situation + // where e.g. someone has run "terraform state mv" to reorder + // a list and left a hole behind. + vals[i] = cty.UnknownVal(schema.ImpliedType()) + } + } + + // We use a tuple rather than a list here because resource schemas may + // include dynamically-typed attributes, which will then cause each + // instance to potentially have a different runtime type even though + // they all conform to the static schema. + return cty.TupleVal(vals), diags + + case states.EachMap: + ty := schema.ImpliedType() + vals := make(map[string]cty.Value, len(rs.Instances)) + for k, is := range rs.Instances { + if sk, ok := k.(addrs.StringKey); ok { + instAddr := addr.Instance(k).Absolute(d.ModulePath) + + // Prefer pending value in plan if present. See getResourceInstanceSingle + // comment for the rationale. + // Prefer pending value in plan if present. See getResourceInstanceSingle + // comment for the rationale. + if is.Current.Status == states.ObjectPlanned { + if change := d.Evaluator.Changes.GetResourceInstanceChange(instAddr, states.CurrentGen); change != nil { + val, err := change.After.Decode(ty) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource instance data in plan", + Detail: fmt.Sprintf("Instance %s data could not be decoded from the plan: %s.", instAddr, err), + Subject: &config.DeclRange, + }) + continue + } + vals[string(sk)] = val + continue + } else { + // If the object is in planned status then we should not + // get here, since we should've found a pending value + // in the plan above instead. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing pending object in plan", + Detail: fmt.Sprintf("Instance %s is marked as having a change pending but that change is not recorded in the plan. This is a bug in Terraform; please report it.", instAddr), + Subject: &config.DeclRange, + }) + continue + } + } + + ios, err := is.Current.Decode(ty) + if err != nil { + // This shouldn't happen, since by the time we get here + // we should've upgraded the state data already. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource instance data in state", + Detail: fmt.Sprintf("Instance %s data could not be decoded from the state: %s.", instAddr, err), + Subject: &config.DeclRange, + }) + continue + } + vals[string(sk)] = ios.Value + } + } + + // We use an object rather than a map here because resource schemas may + // include dynamically-typed attributes, which will then cause each + // instance to potentially have a different runtime type even though + // they all conform to the static schema. + return cty.ObjectVal(vals), diags + + default: + // Should never happen since caller should deal with other modes + panic(fmt.Sprintf("unsupported EachMode %s", rs.EachMode)) + } +} + +func (d *evaluationStateData) getResourceSchema(addr addrs.Resource, providerAddr addrs.AbsProviderConfig) *configschema.Block { + providerType := providerAddr.ProviderConfig.Type + schemas := d.Evaluator.Schemas + schema, _ := schemas.ResourceTypeConfig(providerType, addr.Mode, addr.Type) + return schema +} + +// coerceInstanceKey attempts to convert the given key to the type expected +// for the given EachMode. +// +// If the key is already of the correct type or if it cannot be converted then +// it is returned verbatim. If conversion is required and possible, the +// converted value is returned. Callers should not try to determine if +// conversion was possible, should instead just check if the result is of +// the expected type. +func (d *evaluationStateData) coerceInstanceKey(key addrs.InstanceKey, mode states.EachMode) addrs.InstanceKey { + if key == addrs.NoKey { + // An absent key can't be converted + return key + } + + switch mode { + case states.NoEach: + // No conversions possible at all + return key + case states.EachMap: + if intKey, isInt := key.(addrs.IntKey); isInt { + return addrs.StringKey(strconv.Itoa(int(intKey))) + } + return key + case states.EachList: + if strKey, isStr := key.(addrs.StringKey); isStr { + i, err := strconv.Atoi(string(strKey)) + if err != nil { + return key + } + return addrs.IntKey(i) + } + return key + default: + return key + } +} + +func (d *evaluationStateData) GetTerraformAttr(addr addrs.TerraformAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + switch addr.Name { + + case "workspace": + workspaceName := d.Evaluator.Meta.Env + return cty.StringVal(workspaceName), diags + + case "env": + // Prior to Terraform 0.12 there was an attribute "env", which was + // an alias name for "workspace". This was deprecated and is now + // removed. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "terraform" attribute`, + Detail: `The terraform.env attribute was deprecated in v0.10 and removed in v0.12. The "state environment" concept was rename to "workspace" in v0.12, and so the workspace name can now be accessed using the terraform.workspace attribute.`, + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "terraform" attribute`, + Detail: fmt.Sprintf(`The "terraform" object does not have an attribute named %q. The only supported attribute is terraform.workspace, the name of the currently-selected workspace.`, addr.Name), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } +} + +// nameSuggestion tries to find a name from the given slice of suggested names +// that is close to the given name and returns it if found. If no suggestion +// is close enough, returns the empty string. +// +// The suggestions are tried in order, so earlier suggestions take precedence +// if the given string is similar to two or more suggestions. +// +// This function is intended to be used with a relatively-small number of +// suggestions. It's not optimized for hundreds or thousands of them. +func nameSuggestion(given string, suggestions []string) string { + for _, suggestion := range suggestions { + dist := levenshtein.Distance(given, suggestion, nil) + if dist < 3 { // threshold determined experimentally + return suggestion + } + } + return "" +} + +// moduleDisplayAddr returns a string describing the given module instance +// address that is appropriate for returning to users in situations where the +// root module is possible. Specifically, it returns "the root module" if the +// root module instance is given, or a string representation of the module +// address otherwise. +func moduleDisplayAddr(addr addrs.ModuleInstance) string { + switch { + case addr.IsRoot(): + return "the root module" + default: + return addr.String() + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaluate_valid.go b/vendor/github.com/hashicorp/terraform/terraform/evaluate_valid.go new file mode 100644 index 0000000000..4255102de3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/evaluate_valid.go @@ -0,0 +1,299 @@ +package terraform + +import ( + "fmt" + "sort" + + "github.com/hashicorp/hcl2/hcl" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/helper/didyoumean" + "github.com/hashicorp/terraform/tfdiags" +) + +// StaticValidateReferences checks the given references against schemas and +// other statically-checkable rules, producing error diagnostics if any +// problems are found. +// +// If this method returns errors for a particular reference then evaluating +// that reference is likely to generate a very similar error, so callers should +// not run this method and then also evaluate the source expression(s) and +// merge the two sets of diagnostics together, since this will result in +// confusing redundant errors. +// +// This method can find more errors than can be found by evaluating an +// expression with a partially-populated scope, since it checks the referenced +// names directly against the schema rather than relying on evaluation errors. +// +// The result may include warning diagnostics if, for example, deprecated +// features are referenced. +func (d *evaluationStateData) StaticValidateReferences(refs []*addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + for _, ref := range refs { + moreDiags := d.staticValidateReference(ref, self) + diags = diags.Append(moreDiags) + } + return diags +} + +func (d *evaluationStateData) staticValidateReference(ref *addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics { + modCfg := d.Evaluator.Config.DescendentForInstance(d.ModulePath) + if modCfg == nil { + // This is a bug in the caller rather than a problem with the + // reference, but rather than crashing out here in an unhelpful way + // we'll just ignore it and trust a different layer to catch it. + return nil + } + + if ref.Subject == addrs.Self { + // The "self" address is a special alias for the address given as + // our self parameter here, if present. + if self == nil { + var diags tfdiags.Diagnostics + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "self" reference`, + // This detail message mentions some current practice that + // this codepath doesn't really "know about". If the "self" + // object starts being supported in more contexts later then + // we'll need to adjust this message. + Detail: `The "self" object is not available in this context. This object can be used only in resource provisioner and connection blocks.`, + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + return diags + } + + synthRef := *ref // shallow copy + synthRef.Subject = self + ref = &synthRef + } + + switch addr := ref.Subject.(type) { + + // For static validation we validate both resource and resource instance references the same way. + // We mostly disregard the index, though we do some simple validation of + // its _presence_ in staticValidateSingleResourceReference and + // staticValidateMultiResourceReference respectively. + case addrs.Resource: + var diags tfdiags.Diagnostics + diags = diags.Append(d.staticValidateSingleResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange)) + diags = diags.Append(d.staticValidateResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange)) + return diags + case addrs.ResourceInstance: + var diags tfdiags.Diagnostics + diags = diags.Append(d.staticValidateMultiResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange)) + diags = diags.Append(d.staticValidateResourceReference(modCfg, addr.ContainingResource(), ref.Remaining, ref.SourceRange)) + return diags + + // We also handle all module call references the same way, disregarding index. + case addrs.ModuleCall: + return d.staticValidateModuleCallReference(modCfg, addr, ref.Remaining, ref.SourceRange) + case addrs.ModuleCallInstance: + return d.staticValidateModuleCallReference(modCfg, addr.Call, ref.Remaining, ref.SourceRange) + case addrs.ModuleCallOutput: + // This one is a funny one because we will take the output name referenced + // and use it to fake up a "remaining" that would make sense for the + // module call itself, rather than for the specific output, and then + // we can just re-use our static module call validation logic. + remain := make(hcl.Traversal, len(ref.Remaining)+1) + copy(remain[1:], ref.Remaining) + remain[0] = hcl.TraverseAttr{ + Name: addr.Name, + + // Using the whole reference as the source range here doesn't exactly + // match how HCL would normally generate an attribute traversal, + // but is close enough for our purposes. + SrcRange: ref.SourceRange.ToHCL(), + } + return d.staticValidateModuleCallReference(modCfg, addr.Call.Call, remain, ref.SourceRange) + + default: + // Anything else we'll just permit through without any static validation + // and let it be caught during dynamic evaluation, in evaluate.go . + return nil + } +} + +func (d *evaluationStateData) staticValidateSingleResourceReference(modCfg *configs.Config, addr addrs.Resource, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { + // If we have at least one step in "remain" and this resource has + // "count" set then we know for sure this in invalid because we have + // something like: + // aws_instance.foo.bar + // ...when we really need + // aws_instance.foo[count.index].bar + + // It is _not_ safe to do this check when remain is empty, because that + // would also match aws_instance.foo[count.index].bar due to `count.index` + // not being statically-resolvable as part of a reference, and match + // direct references to the whole aws_instance.foo tuple. + if len(remain) == 0 { + return nil + } + + var diags tfdiags.Diagnostics + + cfg := modCfg.Module.ResourceByAddr(addr) + if cfg == nil { + // We'll just bail out here and catch this in our subsequent call to + // staticValidateResourceReference, then. + return diags + } + + if cfg.Count != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Missing resource instance key`, + Detail: fmt.Sprintf("Because %s has \"count\" set, its attributes must be accessed on specific instances.\n\nFor example, to correlate with indices of a referring resource, use:\n %s[count.index]", addr, addr), + Subject: rng.ToHCL().Ptr(), + }) + } + if cfg.ForEach != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Missing resource instance key`, + Detail: fmt.Sprintf("Because %s has \"for_each\" set, its attributes must be accessed on specific instances.\n\nFor example, to correlate with indices of a referring resource, use:\n %s[each.key]", addr, addr), + Subject: rng.ToHCL().Ptr(), + }) + } + + return diags +} + +func (d *evaluationStateData) staticValidateMultiResourceReference(modCfg *configs.Config, addr addrs.ResourceInstance, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + cfg := modCfg.Module.ResourceByAddr(addr.ContainingResource()) + if cfg == nil { + // We'll just bail out here and catch this in our subsequent call to + // staticValidateResourceReference, then. + return diags + } + + if addr.Key == addrs.NoKey { + // This is a different path into staticValidateSingleResourceReference + return d.staticValidateSingleResourceReference(modCfg, addr.ContainingResource(), remain, rng) + } else { + if cfg.Count == nil && cfg.ForEach == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Unexpected resource instance key`, + Detail: fmt.Sprintf(`Because %s does not have "count" or "for_each" set, references to it must not include an index key. Remove the bracketed index to refer to the single instance of this resource.`, addr.ContainingResource()), + Subject: rng.ToHCL().Ptr(), + }) + } + } + + return diags +} + +func (d *evaluationStateData) staticValidateResourceReference(modCfg *configs.Config, addr addrs.Resource, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + var modeAdjective string + switch addr.Mode { + case addrs.ManagedResourceMode: + modeAdjective = "managed" + case addrs.DataResourceMode: + modeAdjective = "data" + default: + // should never happen + modeAdjective = "" + } + + cfg := modCfg.Module.ResourceByAddr(addr) + if cfg == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared resource`, + Detail: fmt.Sprintf(`A %s resource %q %q has not been declared in %s.`, modeAdjective, addr.Type, addr.Name, moduleConfigDisplayAddr(modCfg.Path)), + Subject: rng.ToHCL().Ptr(), + }) + return diags + } + + // Normally accessing this directly is wrong because it doesn't take into + // account provider inheritance, etc but it's okay here because we're only + // paying attention to the type anyway. + providerType := cfg.ProviderConfigAddr().Type + schema, _ := d.Evaluator.Schemas.ResourceTypeConfig(providerType, addr.Mode, addr.Type) + + if schema == nil { + // Prior validation should've taken care of a resource block with an + // unsupported type, so we should never get here but we'll handle it + // here anyway for robustness. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid resource type`, + Detail: fmt.Sprintf(`A %s resource type %q is not supported by provider %q.`, modeAdjective, addr.Type, providerType), + Subject: rng.ToHCL().Ptr(), + }) + return diags + } + + // As a special case we'll detect attempts to access an attribute called + // "count" and produce a special error for it, since versions of Terraform + // prior to v0.12 offered this as a weird special case that we can no + // longer support. + if len(remain) > 0 { + if step, ok := remain[0].(hcl.TraverseAttr); ok && step.Name == "count" { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid resource count attribute`, + Detail: fmt.Sprintf(`The special "count" attribute is no longer supported after Terraform v0.12. Instead, use length(%s) to count resource instances.`, addr), + Subject: rng.ToHCL().Ptr(), + }) + return diags + } + } + + // If we got this far then we'll try to validate the remaining traversal + // steps against our schema. + moreDiags := schema.StaticValidateTraversal(remain) + diags = diags.Append(moreDiags) + + return diags +} + +func (d *evaluationStateData) staticValidateModuleCallReference(modCfg *configs.Config, addr addrs.ModuleCall, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + // For now, our focus here is just in testing that the referenced module + // call exists. All other validation is deferred until evaluation time. + _, exists := modCfg.Module.ModuleCalls[addr.Name] + if !exists { + var suggestions []string + for name := range modCfg.Module.ModuleCalls { + suggestions = append(suggestions, name) + } + sort.Strings(suggestions) + suggestion := didyoumean.NameSuggestion(addr.Name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared module`, + Detail: fmt.Sprintf(`No module call named %q is declared in %s.%s`, addr.Name, moduleConfigDisplayAddr(modCfg.Path), suggestion), + Subject: rng.ToHCL().Ptr(), + }) + return diags + } + + return diags +} + +// moduleConfigDisplayAddr returns a string describing the given module +// address that is appropriate for returning to users in situations where the +// root module is possible. Specifically, it returns "the root module" if the +// root module instance is given, or a string representation of the module +// address otherwise. +func moduleConfigDisplayAddr(addr addrs.Module) string { + switch { + case addr.IsRoot(): + return "the root module" + default: + return addr.String() + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph.go b/vendor/github.com/hashicorp/terraform/terraform/graph.go index 735ec4eceb..58d45a7b6a 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph.go @@ -3,17 +3,13 @@ package terraform import ( "fmt" "log" - "runtime/debug" - "strings" - "github.com/hashicorp/terraform/dag" -) + "github.com/hashicorp/terraform/tfdiags" -// RootModuleName is the name given to the root module implicitly. -const RootModuleName = "root" + "github.com/hashicorp/terraform/addrs" -// RootModulePath is the path for the root module. -var RootModulePath = []string{RootModuleName} + "github.com/hashicorp/terraform/dag" +) // Graph represents the graph that Terraform uses to represent resources // and their dependencies. @@ -23,9 +19,7 @@ type Graph struct { dag.AcyclicGraph // Path is the path in the module tree that this Graph represents. - // The root is represented by a single element list containing - // RootModuleName - Path []string + Path addrs.ModuleInstance // debugName is a name for reference in the debug output. This is usually // to indicate what topmost builder was, and if this graph is a shadow or @@ -40,71 +34,42 @@ func (g *Graph) DirectedGraph() dag.Grapher { // Walk walks the graph with the given walker for callbacks. The graph // will be walked with full parallelism, so the walker should expect // to be called in concurrently. -func (g *Graph) Walk(walker GraphWalker) error { +func (g *Graph) Walk(walker GraphWalker) tfdiags.Diagnostics { return g.walk(walker) } -func (g *Graph) walk(walker GraphWalker) error { +func (g *Graph) walk(walker GraphWalker) tfdiags.Diagnostics { // The callbacks for enter/exiting a graph ctx := walker.EnterPath(g.Path) defer walker.ExitPath(g.Path) // Get the path for logs - path := strings.Join(ctx.Path(), ".") - - // Determine if our walker is a panic wrapper - panicwrap, ok := walker.(GraphWalkerPanicwrapper) - if !ok { - panicwrap = nil // just to be sure - } + path := ctx.Path().String() debugName := "walk-graph.json" if g.debugName != "" { debugName = g.debugName + "-" + debugName } - debugBuf := dbug.NewFileWriter(debugName) - g.SetDebugWriter(debugBuf) - defer debugBuf.Close() - // Walk the graph. var walkFn dag.WalkFunc - walkFn = func(v dag.Vertex) (rerr error) { - log.Printf("[TRACE] vertex '%s.%s': walking", path, dag.VertexName(v)) + walkFn = func(v dag.Vertex) (diags tfdiags.Diagnostics) { + log.Printf("[TRACE] vertex %q: starting visit (%T)", dag.VertexName(v), v) g.DebugVisitInfo(v, g.debugName) - // If we have a panic wrap GraphWalker and a panic occurs, recover - // and call that. We ensure the return value is an error, however, - // so that future nodes are not called. defer func() { - // If no panicwrap, do nothing - if panicwrap == nil { - return - } - - // If no panic, do nothing - err := recover() - if err == nil { - return - } - - // Modify the return value to show the error - rerr = fmt.Errorf("vertex %q captured panic: %s\n\n%s", - dag.VertexName(v), err, debug.Stack()) - - // Call the panic wrapper - panicwrap.Panic(v, err) + log.Printf("[TRACE] vertex %q: visit complete", dag.VertexName(v)) }() walker.EnterVertex(v) - defer walker.ExitVertex(v, rerr) + defer walker.ExitVertex(v, diags) // vertexCtx is the context that we use when evaluating. This // is normally the context of our graph but can be overridden // with a GraphNodeSubPath impl. vertexCtx := ctx if pn, ok := v.(GraphNodeSubPath); ok && len(pn.Path()) > 0 { - vertexCtx = walker.EnterPath(normalizeModulePath(pn.Path())) + vertexCtx = walker.EnterPath(pn.Path()) defer walker.ExitPath(pn.Path()) } @@ -112,60 +77,64 @@ func (g *Graph) walk(walker GraphWalker) error { if ev, ok := v.(GraphNodeEvalable); ok { tree := ev.EvalTree() if tree == nil { - panic(fmt.Sprintf( - "%s.%s (%T): nil eval tree", path, dag.VertexName(v), v)) + panic(fmt.Sprintf("%q (%T): nil eval tree", dag.VertexName(v), v)) } // Allow the walker to change our tree if needed. Eval, // then callback with the output. - log.Printf("[TRACE] vertex '%s.%s': evaluating", path, dag.VertexName(v)) + log.Printf("[TRACE] vertex %q: evaluating", dag.VertexName(v)) g.DebugVertexInfo(v, fmt.Sprintf("evaluating %T(%s)", v, path)) tree = walker.EnterEvalTree(v, tree) output, err := Eval(tree, vertexCtx) - if rerr = walker.ExitEvalTree(v, output, err); rerr != nil { + diags = diags.Append(walker.ExitEvalTree(v, output, err)) + if diags.HasErrors() { return } } // If the node is dynamically expanded, then expand it if ev, ok := v.(GraphNodeDynamicExpandable); ok { - log.Printf( - "[TRACE] vertex '%s.%s': expanding/walking dynamic subgraph", - path, - dag.VertexName(v)) + log.Printf("[TRACE] vertex %q: expanding dynamic subgraph", dag.VertexName(v)) g.DebugVertexInfo(v, fmt.Sprintf("expanding %T(%s)", v, path)) g, err := ev.DynamicExpand(vertexCtx) if err != nil { - rerr = err + diags = diags.Append(err) return } if g != nil { // Walk the subgraph - if rerr = g.walk(walker); rerr != nil { + log.Printf("[TRACE] vertex %q: entering dynamic subgraph", dag.VertexName(v)) + subDiags := g.walk(walker) + diags = diags.Append(subDiags) + if subDiags.HasErrors() { + log.Printf("[TRACE] vertex %q: dynamic subgraph encountered errors", dag.VertexName(v)) return } + log.Printf("[TRACE] vertex %q: dynamic subgraph completed successfully", dag.VertexName(v)) + } else { + log.Printf("[TRACE] vertex %q: produced no dynamic subgraph", dag.VertexName(v)) } } // If the node has a subgraph, then walk the subgraph if sn, ok := v.(GraphNodeSubgraph); ok { - log.Printf( - "[TRACE] vertex '%s.%s': walking subgraph", - path, - dag.VertexName(v)) + log.Printf("[TRACE] vertex %q: entering static subgraph", dag.VertexName(v)) g.DebugVertexInfo(v, fmt.Sprintf("subgraph: %T(%s)", v, path)) - if rerr = sn.Subgraph().(*Graph).walk(walker); rerr != nil { + subDiags := sn.Subgraph().(*Graph).walk(walker) + if subDiags.HasErrors() { + log.Printf("[TRACE] vertex %q: static subgraph encountered errors", dag.VertexName(v)) return } + log.Printf("[TRACE] vertex %q: static subgraph completed successfully", dag.VertexName(v)) } - return nil + return } return g.AcyclicGraph.Walk(walkFn) diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go index 6374bb9045..66b21f300d 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go @@ -4,6 +4,10 @@ import ( "fmt" "log" "strings" + + "github.com/hashicorp/terraform/tfdiags" + + "github.com/hashicorp/terraform/addrs" ) // GraphBuilder is an interface that can be implemented and used with @@ -12,7 +16,7 @@ type GraphBuilder interface { // Build builds the graph for the given module path. It is up to // the interface implementation whether this build should expand // the graph or not. - Build(path []string) (*Graph, error) + Build(addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) } // BasicGraphBuilder is a GraphBuilder that builds a graph out of a @@ -25,21 +29,16 @@ type BasicGraphBuilder struct { Name string } -func (b *BasicGraphBuilder) Build(path []string) (*Graph, error) { +func (b *BasicGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics g := &Graph{Path: path} - debugName := "graph.json" - if b.Name != "" { - debugName = b.Name + "-" + debugName - } - debugBuf := dbug.NewFileWriter(debugName) - g.SetDebugWriter(debugBuf) - defer debugBuf.Close() - + var lastStepStr string for _, step := range b.Steps { if step == nil { continue } + log.Printf("[TRACE] Executing graph transform %T", step) stepName := fmt.Sprintf("%T", step) dot := strings.LastIndex(stepName, ".") @@ -56,12 +55,20 @@ func (b *BasicGraphBuilder) Build(path []string) (*Graph, error) { } debugOp.End(errMsg) - log.Printf( - "[TRACE] Graph after step %T:\n\n%s", - step, g.StringWithNodeTypes()) + if thisStepStr := g.StringWithNodeTypes(); thisStepStr != lastStepStr { + log.Printf("[TRACE] Completed graph transform %T with new graph:\n%s------", step, thisStepStr) + lastStepStr = thisStepStr + } else { + log.Printf("[TRACE] Completed graph transform %T (no changes)", step) + } if err != nil { - return g, err + if nf, isNF := err.(tfdiags.NonFatalError); isNF { + diags = diags.Append(nf.Diagnostics) + } else { + diags = diags.Append(err) + return g, diags + } } } @@ -69,9 +76,10 @@ func (b *BasicGraphBuilder) Build(path []string) (*Graph, error) { if b.Validate { if err := g.Validate(); err != nil { log.Printf("[ERROR] Graph validation failed. Graph:\n\n%s", g.String()) - return nil, err + diags = diags.Append(err) + return nil, diags } } - return g, nil + return g, diags } diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go index 0c2b2332f4..7182dd7db1 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go @@ -1,8 +1,12 @@ package terraform import ( - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) // ApplyGraphBuilder implements GraphBuilder and is responsible for building @@ -13,26 +17,28 @@ import ( // that aren't explicitly in the diff. There are other scenarios where the // diff can be deviated, so this is just one layer of protection. type ApplyGraphBuilder struct { - // Module is the root module for the graph to build. - Module *module.Tree + // Config is the configuration tree that the diff was built from. + Config *configs.Config - // Diff is the diff to apply. - Diff *Diff + // Changes describes the changes that we need apply. + Changes *plans.Changes // State is the current state - State *State + State *states.State - // Providers is the list of providers supported. - Providers []string + // Components is a factory for the plug-in components (providers and + // provisioners) available for use. + Components contextComponentFactory - // Provisioners is the list of provisioners supported. - Provisioners []string + // Schemas is the repository of schemas we will draw from to analyse + // the configuration. + Schemas *Schemas // Targets are resources to target. This is only required to make sure // unnecessary outputs aren't included in the apply graph. The plan // builder successfully handles targeting resources. In the future, // outputs should go into the diff so that this is unnecessary. - Targets []string + Targets []addrs.Targetable // DisableReduce, if true, will not reduce the graph. Great for testing. DisableReduce bool @@ -45,7 +51,7 @@ type ApplyGraphBuilder struct { } // See GraphBuilder -func (b *ApplyGraphBuilder) Build(path []string) (*Graph, error) { +func (b *ApplyGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { return (&BasicGraphBuilder{ Steps: b.Steps(), Validate: b.Validate, @@ -68,53 +74,99 @@ func (b *ApplyGraphBuilder) Steps() []GraphTransformer { } } + concreteOrphanResource := func(a *NodeAbstractResource) dag.Vertex { + return &NodeDestroyResource{ + NodeAbstractResource: a, + } + } + + concreteResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex { + return &NodeApplyableResourceInstance{ + NodeAbstractResourceInstance: a, + } + } + steps := []GraphTransformer{ - // Creates all the nodes represented in the diff. - &DiffTransformer{ + // Creates all the resources represented in the config. During apply, + // we use this just to ensure that the whole-resource metadata is + // updated to reflect things such as whether the count argument is + // set in config, or which provider configuration manages each resource. + &ConfigTransformer{ Concrete: concreteResource, + Config: b.Config, + }, - Diff: b.Diff, - Module: b.Module, - State: b.State, + // Creates all the resource instances represented in the diff, along + // with dependency edges against the whole-resource nodes added by + // ConfigTransformer above. + &DiffTransformer{ + Concrete: concreteResourceInstance, + State: b.State, + Changes: b.Changes, + }, + + // Creates extra cleanup nodes for any entire resources that are + // no longer present in config, so we can make sure we clean up the + // leftover empty resource states after the instances have been + // destroyed. + // (We don't track this particular type of change in the plan because + // it's just cleanup of our own state object, and so doesn't effect + // any real remote objects or consumable outputs.) + &OrphanResourceTransformer{ + Concrete: concreteOrphanResource, + Config: b.Config, + State: b.State, }, // Create orphan output nodes - &OrphanOutputTransformer{Module: b.Module, State: b.State}, + &OrphanOutputTransformer{Config: b.Config, State: b.State}, // Attach the configuration to any resources - &AttachResourceConfigTransformer{Module: b.Module}, + &AttachResourceConfigTransformer{Config: b.Config}, // Attach the state &AttachStateTransformer{State: b.State}, - // add providers - TransformProviders(b.Providers, concreteProvider, b.Module), - // Destruction ordering - &DestroyEdgeTransformer{Module: b.Module, State: b.State}, + &DestroyEdgeTransformer{ + Config: b.Config, + State: b.State, + Schemas: b.Schemas, + }, GraphTransformIf( func() bool { return !b.Destroy }, - &CBDEdgeTransformer{Module: b.Module, State: b.State}, + &CBDEdgeTransformer{ + Config: b.Config, + State: b.State, + Schemas: b.Schemas, + }, ), // Provisioner-related transformations - &MissingProvisionerTransformer{Provisioners: b.Provisioners}, + &MissingProvisionerTransformer{Provisioners: b.Components.ResourceProvisioners()}, &ProvisionerTransformer{}, // Add root variables - &RootVariableTransformer{Module: b.Module}, + &RootVariableTransformer{Config: b.Config}, // Add the local values - &LocalTransformer{Module: b.Module}, + &LocalTransformer{Config: b.Config}, // Add the outputs - &OutputTransformer{Module: b.Module}, + &OutputTransformer{Config: b.Config}, // Add module variables - &ModuleVariableTransformer{Module: b.Module}, + &ModuleVariableTransformer{Config: b.Config}, + + // add providers + TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), // Remove modules no longer present in the config - &RemovedModuleTransformer{Module: b.Module, State: b.State}, + &RemovedModuleTransformer{Config: b.Config, State: b.State}, + + // Must attach schemas before ReferenceTransformer so that we can + // analyze the configuration to find references. + &AttachSchemaTransformer{Schemas: b.Schemas}, // Connect references so ordering is correct &ReferenceTransformer{}, @@ -135,7 +187,9 @@ func (b *ApplyGraphBuilder) Steps() []GraphTransformer { ), // Add the node to fix the state count boundaries - &CountBoundaryTransformer{}, + &CountBoundaryTransformer{ + Config: b.Config, + }, // Target &TargetsTransformer{Targets: b.Targets}, diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go index 014b348e5f..a6047a9b41 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go @@ -1,8 +1,11 @@ package terraform import ( - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) // DestroyPlanGraphBuilder implements GraphBuilder and is responsible for @@ -11,21 +14,29 @@ import ( // Planning a pure destroy operation is simple because we can ignore most // ordering configuration and simply reverse the state. type DestroyPlanGraphBuilder struct { - // Module is the root module for the graph to build. - Module *module.Tree + // Config is the configuration tree to build the plan from. + Config *configs.Config // State is the current state - State *State + State *states.State + + // Components is a factory for the plug-in components (providers and + // provisioners) available for use. + Components contextComponentFactory + + // Schemas is the repository of schemas we will draw from to analyse + // the configuration. + Schemas *Schemas // Targets are resources to target - Targets []string + Targets []addrs.Targetable // Validate will do structural validation of the graph. Validate bool } // See GraphBuilder -func (b *DestroyPlanGraphBuilder) Build(path []string) (*Graph, error) { +func (b *DestroyPlanGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { return (&BasicGraphBuilder{ Steps: b.Steps(), Validate: b.Validate, @@ -35,25 +46,44 @@ func (b *DestroyPlanGraphBuilder) Build(path []string) (*Graph, error) { // See GraphBuilder func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer { - concreteResource := func(a *NodeAbstractResource) dag.Vertex { - return &NodePlanDestroyableResource{ - NodeAbstractResource: a, + concreteResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex { + return &NodePlanDestroyableResourceInstance{ + NodeAbstractResourceInstance: a, + } + } + concreteResourceInstanceDeposed := func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex { + return &NodePlanDeposedResourceInstanceObject{ + NodeAbstractResourceInstance: a, + DeposedKey: key, + } + } + + concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{ + NodeAbstractProvider: a, } } steps := []GraphTransformer{ - // Creates all the nodes represented in the state. + // Creates nodes for the resource instances tracked in the state. &StateTransformer{ - Concrete: concreteResource, - State: b.State, + ConcreteCurrent: concreteResourceInstance, + ConcreteDeposed: concreteResourceInstanceDeposed, + State: b.State, }, // Attach the configuration to any resources - &AttachResourceConfigTransformer{Module: b.Module}, + &AttachResourceConfigTransformer{Config: b.Config}, + + TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), // Destruction ordering. We require this only so that // targeting below will prune the correct things. - &DestroyEdgeTransformer{Module: b.Module, State: b.State}, + &DestroyEdgeTransformer{ + Config: b.Config, + State: b.State, + Schemas: b.Schemas, + }, // Target. Note we don't set "Destroy: true" here since we already // created proper destroy ordering. diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_eval.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_eval.go new file mode 100644 index 0000000000..eb6c897bf1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_eval.go @@ -0,0 +1,108 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" +) + +// EvalGraphBuilder implements GraphBuilder and constructs a graph suitable +// for evaluating in-memory values (input variables, local values, output +// values) in the state without any other side-effects. +// +// This graph is used only in weird cases, such as the "terraform console" +// CLI command, where we need to evaluate expressions against the state +// without taking any other actions. +// +// The generated graph will include nodes for providers, resources, etc +// just to allow indirect dependencies to be resolved, but these nodes will +// not take any actions themselves since we assume that their parts of the +// state, if any, are already complete. +// +// Although the providers are never configured, they must still be available +// in order to obtain schema information used for type checking, etc. +type EvalGraphBuilder struct { + // Config is the configuration tree. + Config *configs.Config + + // State is the current state + State *states.State + + // Components is a factory for the plug-in components (providers and + // provisioners) available for use. + Components contextComponentFactory + + // Schemas is the repository of schemas we will draw from to analyse + // the configuration. + Schemas *Schemas +} + +// See GraphBuilder +func (b *EvalGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { + return (&BasicGraphBuilder{ + Steps: b.Steps(), + Validate: true, + Name: "EvalGraphBuilder", + }).Build(path) +} + +// See GraphBuilder +func (b *EvalGraphBuilder) Steps() []GraphTransformer { + concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { + return &NodeEvalableProvider{ + NodeAbstractProvider: a, + } + } + + steps := []GraphTransformer{ + // Creates all the data resources that aren't in the state. This will also + // add any orphans from scaling in as destroy nodes. + &ConfigTransformer{ + Concrete: nil, // just use the abstract type + Config: b.Config, + Unique: true, + }, + + // Attach the state + &AttachStateTransformer{State: b.State}, + + // Attach the configuration to any resources + &AttachResourceConfigTransformer{Config: b.Config}, + + // Add root variables + &RootVariableTransformer{Config: b.Config}, + + // Add the local values + &LocalTransformer{Config: b.Config}, + + // Add the outputs + &OutputTransformer{Config: b.Config}, + + // Add module variables + &ModuleVariableTransformer{Config: b.Config}, + + TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), + + // Must attach schemas before ReferenceTransformer so that we can + // analyze the configuration to find references. + &AttachSchemaTransformer{Schemas: b.Schemas}, + + // Connect so that the references are ready for targeting. We'll + // have to connect again later for providers and so on. + &ReferenceTransformer{}, + + // Although we don't configure providers, we do still start them up + // to get their schemas, and so we must shut them down again here. + &CloseProviderTransformer{}, + + // Single root + &RootTransformer{}, + + // Remove redundant edges to simplify the graph. + &TransitiveReductionTransformer{}, + } + + return steps +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go index 07a1eaf830..7b0e39f45c 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go @@ -1,8 +1,10 @@ package terraform import ( - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/tfdiags" ) // ImportGraphBuilder implements GraphBuilder and is responsible for building @@ -12,15 +14,19 @@ type ImportGraphBuilder struct { // ImportTargets are the list of resources to import. ImportTargets []*ImportTarget - // Module is the module to add to the graph. See ImportOpts.Module. - Module *module.Tree + // Module is a configuration to build the graph from. See ImportOpts.Config. + Config *configs.Config - // Providers is the list of providers supported. - Providers []string + // Components is the factory for our available plugin components. + Components contextComponentFactory + + // Schemas is the repository of schemas we will draw from to analyse + // the configuration. + Schemas *Schemas } // Build builds the graph according to the steps returned by Steps. -func (b *ImportGraphBuilder) Build(path []string) (*Graph, error) { +func (b *ImportGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { return (&BasicGraphBuilder{ Steps: b.Steps(), Validate: true, @@ -33,9 +39,9 @@ func (b *ImportGraphBuilder) Build(path []string) (*Graph, error) { func (b *ImportGraphBuilder) Steps() []GraphTransformer { // Get the module. If we don't have one, we just use an empty tree // so that the transform still works but does nothing. - mod := b.Module - if mod == nil { - mod = module.NewEmptyTree() + config := b.Config + if config == nil { + config = configs.NewEmptyConfig() } // Custom factory for creating providers. @@ -47,16 +53,36 @@ func (b *ImportGraphBuilder) Steps() []GraphTransformer { steps := []GraphTransformer{ // Create all our resources from the configuration and state - &ConfigTransformer{Module: mod}, + &ConfigTransformer{Config: config}, // Add the import steps &ImportStateTransformer{Targets: b.ImportTargets}, - TransformProviders(b.Providers, concreteProvider, mod), + // Add root variables + &RootVariableTransformer{Config: b.Config}, + + TransformProviders(b.Components.ResourceProviders(), concreteProvider, config), // This validates that the providers only depend on variables &ImportProviderValidateTransformer{}, + // Add the local values + &LocalTransformer{Config: b.Config}, + + // Add the outputs + &OutputTransformer{Config: b.Config}, + + // Add module variables + &ModuleVariableTransformer{Config: b.Config}, + + // Must attach schemas before ReferenceTransformer so that we can + // analyze the configuration to find references. + &AttachSchemaTransformer{Schemas: b.Schemas}, + + // Connect so that the references are ready for targeting. We'll + // have to connect again later for providers and so on. + &ReferenceTransformer{}, + // Close opened plugin connections &CloseProviderTransformer{}, diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go deleted file mode 100644 index 0df48cdb87..0000000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go +++ /dev/null @@ -1,27 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/dag" -) - -// InputGraphBuilder creates the graph for the input operation. -// -// Unlike other graph builders, this is a function since it currently modifies -// and is based on the PlanGraphBuilder. The PlanGraphBuilder passed in will be -// modified and should not be used for any other operations. -func InputGraphBuilder(p *PlanGraphBuilder) GraphBuilder { - // We're going to customize the concrete functions - p.CustomConcrete = true - - // Set the provider to the normal provider. This will ask for input. - p.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - // We purposely don't set any more concrete fields since the remainder - // should be no-ops. - - return p -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go index f8dd0fc93a..17adfd279e 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go @@ -3,8 +3,11 @@ package terraform import ( "sync" - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) // PlanGraphBuilder implements GraphBuilder and is responsible for building @@ -19,20 +22,22 @@ import ( // create-before-destroy can be completely ignored. // type PlanGraphBuilder struct { - // Module is the root module for the graph to build. - Module *module.Tree + // Config is the configuration tree to build a plan from. + Config *configs.Config // State is the current state - State *State + State *states.State - // Providers is the list of providers supported. - Providers []string + // Components is a factory for the plug-in components (providers and + // provisioners) available for use. + Components contextComponentFactory - // Provisioners is the list of provisioners supported. - Provisioners []string + // Schemas is the repository of schemas we will draw from to analyse + // the configuration. + Schemas *Schemas // Targets are resources to target - Targets []string + Targets []addrs.Targetable // DisableReduce, if true, will not reduce the graph. Great for testing. DisableReduce bool @@ -46,13 +51,13 @@ type PlanGraphBuilder struct { CustomConcrete bool ConcreteProvider ConcreteProviderNodeFunc ConcreteResource ConcreteResourceNodeFunc - ConcreteResourceOrphan ConcreteResourceNodeFunc + ConcreteResourceOrphan ConcreteResourceInstanceNodeFunc once sync.Once } // See GraphBuilder -func (b *PlanGraphBuilder) Build(path []string) (*Graph, error) { +func (b *PlanGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { return (&BasicGraphBuilder{ Steps: b.Steps(), Validate: b.Validate, @@ -64,66 +69,82 @@ func (b *PlanGraphBuilder) Build(path []string) (*Graph, error) { func (b *PlanGraphBuilder) Steps() []GraphTransformer { b.once.Do(b.init) + concreteResourceInstanceDeposed := func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex { + return &NodePlanDeposedResourceInstanceObject{ + NodeAbstractResourceInstance: a, + DeposedKey: key, + } + } + steps := []GraphTransformer{ // Creates all the resources represented in the config &ConfigTransformer{ Concrete: b.ConcreteResource, - Module: b.Module, + Config: b.Config, }, // Add the local values - &LocalTransformer{Module: b.Module}, + &LocalTransformer{Config: b.Config}, // Add the outputs - &OutputTransformer{Module: b.Module}, + &OutputTransformer{Config: b.Config}, // Add orphan resources - &OrphanResourceTransformer{ + &OrphanResourceInstanceTransformer{ Concrete: b.ConcreteResourceOrphan, State: b.State, - Module: b.Module, + Config: b.Config, + }, + + // We also need nodes for any deposed instance objects present in the + // state, so we can plan to destroy them. (This intentionally + // skips creating nodes for _current_ objects, since ConfigTransformer + // created nodes that will do that during DynamicExpand.) + &StateTransformer{ + ConcreteDeposed: concreteResourceInstanceDeposed, + State: b.State, }, // Create orphan output nodes &OrphanOutputTransformer{ - Module: b.Module, + Config: b.Config, State: b.State, }, // Attach the configuration to any resources - &AttachResourceConfigTransformer{Module: b.Module}, + &AttachResourceConfigTransformer{Config: b.Config}, // Attach the state &AttachStateTransformer{State: b.State}, // Add root variables - &RootVariableTransformer{Module: b.Module}, - - TransformProviders(b.Providers, b.ConcreteProvider, b.Module), + &RootVariableTransformer{Config: b.Config}, - // Provisioner-related transformations. Only add these if requested. - GraphTransformIf( - func() bool { return b.Provisioners != nil }, - GraphTransformMulti( - &MissingProvisionerTransformer{Provisioners: b.Provisioners}, - &ProvisionerTransformer{}, - ), - ), + &MissingProvisionerTransformer{Provisioners: b.Components.ResourceProvisioners()}, + &ProvisionerTransformer{}, // Add module variables &ModuleVariableTransformer{ - Module: b.Module, + Config: b.Config, }, + TransformProviders(b.Components.ResourceProviders(), b.ConcreteProvider, b.Config), + // Remove modules no longer present in the config - &RemovedModuleTransformer{Module: b.Module, State: b.State}, + &RemovedModuleTransformer{Config: b.Config, State: b.State}, + + // Must attach schemas before ReferenceTransformer so that we can + // analyze the configuration to find references. + &AttachSchemaTransformer{Schemas: b.Schemas}, // Connect so that the references are ready for targeting. We'll // have to connect again later for providers and so on. &ReferenceTransformer{}, // Add the node to fix the state count boundaries - &CountBoundaryTransformer{}, + &CountBoundaryTransformer{ + Config: b.Config, + }, // Target &TargetsTransformer{ @@ -136,6 +157,10 @@ func (b *PlanGraphBuilder) Steps() []GraphTransformer { IgnoreIndices: true, }, + // Detect when create_before_destroy must be forced on for a particular + // node due to dependency edges, to avoid graph cycles during apply. + &ForcedCBDTransformer{}, + // Close opened plugin connections &CloseProviderTransformer{}, &CloseProvisionerTransformer{}, @@ -167,15 +192,13 @@ func (b *PlanGraphBuilder) init() { b.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { return &NodePlannableResource{ - NodeAbstractCountResource: &NodeAbstractCountResource{ - NodeAbstractResource: a, - }, + NodeAbstractResource: a, } } - b.ConcreteResourceOrphan = func(a *NodeAbstractResource) dag.Vertex { - return &NodePlannableResourceOrphan{ - NodeAbstractResource: a, + b.ConcreteResourceOrphan = func(a *NodeAbstractResourceInstance) dag.Vertex { + return &NodePlannableResourceInstanceOrphan{ + NodeAbstractResourceInstance: a, } } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go index 9638d4c8f3..0342cdbe83 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go @@ -3,8 +3,11 @@ package terraform import ( "log" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/dag" ) @@ -21,17 +24,22 @@ import ( // create-before-destroy can be completely ignored. // type RefreshGraphBuilder struct { - // Module is the root module for the graph to build. - Module *module.Tree + // Config is the configuration tree. + Config *configs.Config + + // State is the prior state + State *states.State - // State is the current state - State *State + // Components is a factory for the plug-in components (providers and + // provisioners) available for use. + Components contextComponentFactory - // Providers is the list of providers supported. - Providers []string + // Schemas is the repository of schemas we will draw from to analyse + // the configuration. + Schemas *Schemas // Targets are resources to target - Targets []string + Targets []addrs.Targetable // DisableReduce, if true, will not reduce the graph. Great for testing. DisableReduce bool @@ -41,7 +49,7 @@ type RefreshGraphBuilder struct { } // See GraphBuilder -func (b *RefreshGraphBuilder) Build(path []string) (*Graph, error) { +func (b *RefreshGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { return (&BasicGraphBuilder{ Steps: b.Steps(), Validate: b.Validate, @@ -60,23 +68,27 @@ func (b *RefreshGraphBuilder) Steps() []GraphTransformer { concreteManagedResource := func(a *NodeAbstractResource) dag.Vertex { return &NodeRefreshableManagedResource{ - NodeAbstractCountResource: &NodeAbstractCountResource{ - NodeAbstractResource: a, - }, + NodeAbstractResource: a, } } - concreteManagedResourceInstance := func(a *NodeAbstractResource) dag.Vertex { + concreteManagedResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex { return &NodeRefreshableManagedResourceInstance{ - NodeAbstractResource: a, + NodeAbstractResourceInstance: a, + } + } + + concreteResourceInstanceDeposed := func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex { + // The "Plan" node type also handles refreshing behavior. + return &NodePlanDeposedResourceInstanceObject{ + NodeAbstractResourceInstance: a, + DeposedKey: key, } } concreteDataResource := func(a *NodeAbstractResource) dag.Vertex { return &NodeRefreshableDataResource{ - NodeAbstractCountResource: &NodeAbstractCountResource{ - NodeAbstractResource: a, - }, + NodeAbstractResource: a, } } @@ -88,13 +100,13 @@ func (b *RefreshGraphBuilder) Steps() []GraphTransformer { if b.State.HasResources() { return &ConfigTransformer{ Concrete: concreteManagedResource, - Module: b.Module, + Config: b.Config, Unique: true, ModeFilter: true, - Mode: config.ManagedResourceMode, + Mode: addrs.ManagedResourceMode, } } - log.Println("[TRACE] No managed resources in state during refresh, skipping managed resource transformer") + log.Println("[TRACE] No managed resources in state during refresh; skipping managed resource transformer") return nil }(), @@ -102,40 +114,53 @@ func (b *RefreshGraphBuilder) Steps() []GraphTransformer { // add any orphans from scaling in as destroy nodes. &ConfigTransformer{ Concrete: concreteDataResource, - Module: b.Module, + Config: b.Config, Unique: true, ModeFilter: true, - Mode: config.DataResourceMode, + Mode: addrs.DataResourceMode, }, // Add any fully-orphaned resources from config (ones that have been // removed completely, not ones that are just orphaned due to a scaled-in // count. - &OrphanResourceTransformer{ + &OrphanResourceInstanceTransformer{ Concrete: concreteManagedResourceInstance, State: b.State, - Module: b.Module, + Config: b.Config, + }, + + // We also need nodes for any deposed instance objects present in the + // state, so we can check if they still exist. (This intentionally + // skips creating nodes for _current_ objects, since ConfigTransformer + // created nodes that will do that during DynamicExpand.) + &StateTransformer{ + ConcreteDeposed: concreteResourceInstanceDeposed, + State: b.State, }, // Attach the state &AttachStateTransformer{State: b.State}, // Attach the configuration to any resources - &AttachResourceConfigTransformer{Module: b.Module}, + &AttachResourceConfigTransformer{Config: b.Config}, // Add root variables - &RootVariableTransformer{Module: b.Module}, - - TransformProviders(b.Providers, concreteProvider, b.Module), + &RootVariableTransformer{Config: b.Config}, // Add the local values - &LocalTransformer{Module: b.Module}, + &LocalTransformer{Config: b.Config}, // Add the outputs - &OutputTransformer{Module: b.Module}, + &OutputTransformer{Config: b.Config}, // Add module variables - &ModuleVariableTransformer{Module: b.Module}, + &ModuleVariableTransformer{Config: b.Config}, + + TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), + + // Must attach schemas before ReferenceTransformer so that we can + // analyze the configuration to find references. + &AttachSchemaTransformer{Schemas: b.Schemas}, // Connect so that the references are ready for targeting. We'll // have to connect again later for providers and so on. diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go index 645ec7be96..1881f95f2b 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go @@ -23,9 +23,7 @@ func ValidateGraphBuilder(p *PlanGraphBuilder) GraphBuilder { p.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { return &NodeValidatableResource{ - NodeAbstractCountResource: &NodeAbstractCountResource{ - NodeAbstractResource: a, - }, + NodeAbstractResource: a, } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go b/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go index 2897eb546a..768590fb05 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go @@ -1,7 +1,11 @@ package terraform +import ( + "github.com/hashicorp/terraform/addrs" +) + // GraphNodeSubPath says that a node is part of a graph with a // different path, and the context should be adjusted accordingly. type GraphNodeSubPath interface { - Path() []string + Path() addrs.ModuleInstance } diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go index 34ce6f6404..e980e0c6d3 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go @@ -1,60 +1,32 @@ package terraform import ( + "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/tfdiags" ) // GraphWalker is an interface that can be implemented that when used // with Graph.Walk will invoke the given callbacks under certain events. type GraphWalker interface { - EnterPath([]string) EvalContext - ExitPath([]string) + EnterPath(addrs.ModuleInstance) EvalContext + ExitPath(addrs.ModuleInstance) EnterVertex(dag.Vertex) - ExitVertex(dag.Vertex, error) + ExitVertex(dag.Vertex, tfdiags.Diagnostics) EnterEvalTree(dag.Vertex, EvalNode) EvalNode - ExitEvalTree(dag.Vertex, interface{}, error) error + ExitEvalTree(dag.Vertex, interface{}, error) tfdiags.Diagnostics } -// GrpahWalkerPanicwrapper can be optionally implemented to catch panics -// that occur while walking the graph. This is not generally recommended -// since panics should crash Terraform and result in a bug report. However, -// this is particularly useful for situations like the shadow graph where -// you don't ever want to cause a panic. -type GraphWalkerPanicwrapper interface { - GraphWalker - - // Panic is called when a panic occurs. This will halt the panic from - // propogating so if the walker wants it to crash still it should panic - // again. This is called from within a defer so runtime/debug.Stack can - // be used to get the stack trace of the panic. - Panic(dag.Vertex, interface{}) -} - -// GraphWalkerPanicwrap wraps an existing Graphwalker to wrap and swallow -// the panics. This doesn't lose the panics since the panics are still -// returned as errors as part of a graph walk. -func GraphWalkerPanicwrap(w GraphWalker) GraphWalkerPanicwrapper { - return &graphWalkerPanicwrapper{ - GraphWalker: w, - } -} - -type graphWalkerPanicwrapper struct { - GraphWalker -} - -func (graphWalkerPanicwrapper) Panic(dag.Vertex, interface{}) {} - // NullGraphWalker is a GraphWalker implementation that does nothing. // This can be embedded within other GraphWalker implementations for easily // implementing all the required functions. type NullGraphWalker struct{} -func (NullGraphWalker) EnterPath([]string) EvalContext { return new(MockEvalContext) } -func (NullGraphWalker) ExitPath([]string) {} +func (NullGraphWalker) EnterPath(addrs.ModuleInstance) EvalContext { return new(MockEvalContext) } +func (NullGraphWalker) ExitPath(addrs.ModuleInstance) {} func (NullGraphWalker) EnterVertex(dag.Vertex) {} -func (NullGraphWalker) ExitVertex(dag.Vertex, error) {} +func (NullGraphWalker) ExitVertex(dag.Vertex, tfdiags.Diagnostics) {} func (NullGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { return n } -func (NullGraphWalker) ExitEvalTree(dag.Vertex, interface{}, error) error { +func (NullGraphWalker) ExitEvalTree(dag.Vertex, interface{}, error) tfdiags.Diagnostics { return nil } diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go index 89f376e54f..03c192a866 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go @@ -2,12 +2,19 @@ package terraform import ( "context" - "fmt" "log" "sync" - "github.com/hashicorp/errwrap" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/provisioners" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) // ContextGraphWalker is the GraphWalker implementation used with the @@ -16,54 +23,56 @@ type ContextGraphWalker struct { NullGraphWalker // Configurable values - Context *Context - Operation walkOperation - StopContext context.Context - - // Outputs, do not set these. Do not read these while the graph - // is being walked. - ValidationWarnings []string - ValidationErrors []error - - errorLock sync.Mutex - once sync.Once - contexts map[string]*BuiltinEvalContext - contextLock sync.Mutex - interpolaterVars map[string]map[string]interface{} - interpolaterVarLock sync.Mutex - providerCache map[string]ResourceProvider - providerLock sync.Mutex - provisionerCache map[string]ResourceProvisioner - provisionerLock sync.Mutex + Context *Context + State *states.SyncState // Used for safe concurrent access to state + Changes *plans.ChangesSync // Used for safe concurrent writes to changes + Operation walkOperation + StopContext context.Context + RootVariableValues InputValues + + // This is an output. Do not set this, nor read it while a graph walk + // is in progress. + NonFatalDiagnostics tfdiags.Diagnostics + + errorLock sync.Mutex + once sync.Once + contexts map[string]*BuiltinEvalContext + contextLock sync.Mutex + variableValues map[string]map[string]cty.Value + variableValuesLock sync.Mutex + providerCache map[string]providers.Interface + providerSchemas map[string]*ProviderSchema + providerLock sync.Mutex + provisionerCache map[string]provisioners.Interface + provisionerSchemas map[string]*configschema.Block + provisionerLock sync.Mutex } -func (w *ContextGraphWalker) EnterPath(path []string) EvalContext { +func (w *ContextGraphWalker) EnterPath(path addrs.ModuleInstance) EvalContext { w.once.Do(w.init) w.contextLock.Lock() defer w.contextLock.Unlock() // If we already have a context for this path cached, use that - key := PathCacheKey(path) + key := path.String() if ctx, ok := w.contexts[key]; ok { return ctx } - // Setup the variables for this interpolater - variables := make(map[string]interface{}) - if len(path) <= 1 { - for k, v := range w.Context.variables { - variables[k] = v - } - } - w.interpolaterVarLock.Lock() - if m, ok := w.interpolaterVars[key]; ok { - for k, v := range m { - variables[k] = v - } + // Our evaluator shares some locks with the main context and the walker + // so that we can safely run multiple evaluations at once across + // different modules. + evaluator := &Evaluator{ + Meta: w.Context.meta, + Config: w.Context.config, + Operation: w.Operation, + State: w.State, + Changes: w.Changes, + Schemas: w.Context.schemas, + VariableValues: w.variableValues, + VariableValuesLock: &w.variableValuesLock, } - w.interpolaterVars[key] = variables - w.interpolaterVarLock.Unlock() ctx := &BuiltinEvalContext{ StopContext: w.StopContext, @@ -71,26 +80,17 @@ func (w *ContextGraphWalker) EnterPath(path []string) EvalContext { Hooks: w.Context.hooks, InputValue: w.Context.uiInput, Components: w.Context.components, + Schemas: w.Context.schemas, ProviderCache: w.providerCache, ProviderInputConfig: w.Context.providerInputConfig, ProviderLock: &w.providerLock, ProvisionerCache: w.provisionerCache, ProvisionerLock: &w.provisionerLock, - DiffValue: w.Context.diff, - DiffLock: &w.Context.diffLock, - StateValue: w.Context.state, - StateLock: &w.Context.stateLock, - Interpolater: &Interpolater{ - Operation: w.Operation, - Meta: w.Context.meta, - Module: w.Context.module, - State: w.Context.state, - StateLock: &w.Context.stateLock, - VariableValues: variables, - VariableValuesLock: &w.interpolaterVarLock, - }, - InterpolaterVars: w.interpolaterVars, - InterpolaterVarLock: &w.interpolaterVarLock, + ChangesValue: w.Changes, + StateValue: w.State, + Evaluator: evaluator, + VariableValues: w.variableValues, + VariableValuesLock: &w.variableValuesLock, } w.contexts[key] = ctx @@ -98,8 +98,7 @@ func (w *ContextGraphWalker) EnterPath(path []string) EvalContext { } func (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { - log.Printf("[TRACE] [%s] Entering eval tree: %s", - w.Operation, dag.VertexName(v)) + log.Printf("[TRACE] [%s] Entering eval tree: %s", w.Operation, dag.VertexName(v)) // Acquire a lock on the semaphore w.Context.parallelSem.Acquire() @@ -109,10 +108,8 @@ func (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { return EvalFilter(n, EvalNodeFilterOp(w.Operation)) } -func (w *ContextGraphWalker) ExitEvalTree( - v dag.Vertex, output interface{}, err error) error { - log.Printf("[TRACE] [%s] Exiting eval tree: %s", - w.Operation, dag.VertexName(v)) +func (w *ContextGraphWalker) ExitEvalTree(v dag.Vertex, output interface{}, err error) tfdiags.Diagnostics { + log.Printf("[TRACE] [%s] Exiting eval tree: %s", w.Operation, dag.VertexName(v)) // Release the semaphore w.Context.parallelSem.Release() @@ -125,30 +122,36 @@ func (w *ContextGraphWalker) ExitEvalTree( w.errorLock.Lock() defer w.errorLock.Unlock() - // Try to get a validation error out of it. If its not a validation - // error, then just record the normal error. - verr, ok := err.(*EvalValidateError) - if !ok { - return err - } - - for _, msg := range verr.Warnings { - w.ValidationWarnings = append( - w.ValidationWarnings, - fmt.Sprintf("%s: %s", dag.VertexName(v), msg)) - } - for _, e := range verr.Errors { - w.ValidationErrors = append( - w.ValidationErrors, - errwrap.Wrapf(fmt.Sprintf("%s: {{err}}", dag.VertexName(v)), e)) + // If the error is non-fatal then we'll accumulate its diagnostics in our + // non-fatal list, rather than returning it directly, so that the graph + // walk can continue. + if nferr, ok := err.(tfdiags.NonFatalError); ok { + log.Printf("[WARN] %s: %s", dag.VertexName(v), nferr) + w.NonFatalDiagnostics = w.NonFatalDiagnostics.Append(nferr.Diagnostics) + return nil } - return nil + // Otherwise, we'll let our usual diagnostics machinery figure out how to + // unpack this as one or more diagnostic messages and return that. If we + // get down here then the returned diagnostics will contain at least one + // error, causing the graph walk to halt. + var diags tfdiags.Diagnostics + diags = diags.Append(err) + return diags } func (w *ContextGraphWalker) init() { - w.contexts = make(map[string]*BuiltinEvalContext, 5) - w.providerCache = make(map[string]ResourceProvider, 5) - w.provisionerCache = make(map[string]ResourceProvisioner, 5) - w.interpolaterVars = make(map[string]map[string]interface{}, 5) + w.contexts = make(map[string]*BuiltinEvalContext) + w.providerCache = make(map[string]providers.Interface) + w.providerSchemas = make(map[string]*ProviderSchema) + w.provisionerCache = make(map[string]provisioners.Interface) + w.provisionerSchemas = make(map[string]*configschema.Block) + w.variableValues = make(map[string]map[string]cty.Value) + + // Populate root module variable values. Other modules will be populated + // during the graph walk. + w.variableValues[""] = make(map[string]cty.Value) + for k, iv := range w.RootVariableValues { + w.variableValues[""][k] = iv.Value + } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go index 3fb3748191..a3756e764d 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go @@ -7,7 +7,6 @@ type walkOperation byte const ( walkInvalid walkOperation = iota - walkInput walkApply walkPlan walkPlanDestroy @@ -15,4 +14,5 @@ const ( walkValidate walkDestroy walkImport + walkEval // used just to prepare EvalContext for expression evaluation, with no other actions ) diff --git a/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go index 95ef4e94d4..855ef7b127 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go @@ -4,9 +4,9 @@ package terraform import "strconv" -const _GraphType_name = "GraphTypeInvalidGraphTypeLegacyGraphTypeRefreshGraphTypePlanGraphTypePlanDestroyGraphTypeApplyGraphTypeInputGraphTypeValidate" +const _GraphType_name = "GraphTypeInvalidGraphTypeLegacyGraphTypeRefreshGraphTypePlanGraphTypePlanDestroyGraphTypeApplyGraphTypeValidateGraphTypeEval" -var _GraphType_index = [...]uint8{0, 16, 31, 47, 60, 80, 94, 108, 125} +var _GraphType_index = [...]uint8{0, 16, 31, 47, 60, 80, 94, 111, 124} func (i GraphType) String() string { if i >= GraphType(len(_GraphType_index)-1) { diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook.go b/vendor/github.com/hashicorp/terraform/terraform/hook.go index ab11e8ee01..c0bb23ab2d 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/hook.go +++ b/vendor/github.com/hashicorp/terraform/terraform/hook.go @@ -1,5 +1,14 @@ package terraform +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" +) + // HookAction is an enum of actions that can be taken as a result of a hook // callback. This allows you to modify the behavior of Terraform at runtime. type HookAction byte @@ -21,42 +30,56 @@ const ( // NilHook into your struct, which implements all of the interface but does // nothing. Then, override only the functions you want to implement. type Hook interface { - // PreApply and PostApply are called before and after a single - // resource is applied. The error argument in PostApply is the + // PreApply and PostApply are called before and after an action for a + // single instance is applied. The error argument in PostApply is the // error, if any, that was returned from the provider Apply call itself. - PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) - PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) - - // PreDiff and PostDiff are called before and after a single resource - // resource is diffed. - PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) - PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) - - // Provisioning hooks + PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) + PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) + + // PreDiff and PostDiff are called before and after a provider is given + // the opportunity to customize the proposed new state to produce the + // planned new state. + PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) + PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) + + // The provisioning hooks signal both the overall start end end of + // provisioning for a particular instance and of each of the individual + // configured provisioners for each instance. The sequence of these + // for a given instance might look something like this: // - // All should be self-explanatory. ProvisionOutput is called with - // output sent back by the provisioners. This will be called multiple - // times as output comes in, but each call should represent a line of - // output. The ProvisionOutput method cannot control whether the - // hook continues running. - PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) - PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) - PreProvision(*InstanceInfo, string) (HookAction, error) - PostProvision(*InstanceInfo, string, error) (HookAction, error) - ProvisionOutput(*InstanceInfo, string, string) + // PreProvisionInstance(aws_instance.foo[1], ...) + // PreProvisionInstanceStep(aws_instance.foo[1], "file") + // PostProvisionInstanceStep(aws_instance.foo[1], "file", nil) + // PreProvisionInstanceStep(aws_instance.foo[1], "remote-exec") + // ProvisionOutput(aws_instance.foo[1], "remote-exec", "Installing foo...") + // ProvisionOutput(aws_instance.foo[1], "remote-exec", "Configuring bar...") + // PostProvisionInstanceStep(aws_instance.foo[1], "remote-exec", nil) + // PostProvisionInstance(aws_instance.foo[1], ...) + // + // ProvisionOutput is called with output sent back by the provisioners. + // This will be called multiple times as output comes in, with each call + // representing one line of output. It cannot control whether the + // provisioner continues running. + PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) + PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) + PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) + PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) + ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) // PreRefresh and PostRefresh are called before and after a single // resource state is refreshed, respectively. - PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) - PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) - - // PostStateUpdate is called after the state is updated. - PostStateUpdate(*State) (HookAction, error) + PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) + PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) // PreImportState and PostImportState are called before and after - // a single resource's state is being improted. - PreImportState(*InstanceInfo, string) (HookAction, error) - PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) + // (respectively) each state import operation for a given resource address. + PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) + PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) + + // PostStateUpdate is called each time the state is updated. It receives + // a deep copy of the state, which it may therefore access freely without + // any need for locks to protect from concurrent writes from the caller. + PostStateUpdate(new *states.State) (HookAction, error) } // NilHook is a Hook implementation that does nothing. It exists only to @@ -64,59 +87,60 @@ type Hook interface { // and only implement the functions you are interested in. type NilHook struct{} -func (*NilHook) PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) { +var _ Hook = (*NilHook)(nil) + +func (*NilHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { return HookActionContinue, nil } -func (*NilHook) PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) { +func (*NilHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { return HookActionContinue, nil } -func (*NilHook) PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) { +func (*NilHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { return HookActionContinue, nil } -func (*NilHook) PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) { +func (*NilHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { return HookActionContinue, nil } -func (*NilHook) PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) { +func (*NilHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { return HookActionContinue, nil } -func (*NilHook) PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) { +func (*NilHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { return HookActionContinue, nil } -func (*NilHook) PreProvision(*InstanceInfo, string) (HookAction, error) { +func (*NilHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { return HookActionContinue, nil } -func (*NilHook) PostProvision(*InstanceInfo, string, error) (HookAction, error) { +func (*NilHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { return HookActionContinue, nil } -func (*NilHook) ProvisionOutput( - *InstanceInfo, string, string) { +func (*NilHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { } -func (*NilHook) PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) { +func (*NilHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { return HookActionContinue, nil } -func (*NilHook) PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) { +func (*NilHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { return HookActionContinue, nil } -func (*NilHook) PreImportState(*InstanceInfo, string) (HookAction, error) { +func (*NilHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { return HookActionContinue, nil } -func (*NilHook) PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) { +func (*NilHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { return HookActionContinue, nil } -func (*NilHook) PostStateUpdate(*State) (HookAction, error) { +func (*NilHook) PostStateUpdate(new *states.State) (HookAction, error) { return HookActionContinue, nil } diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go b/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go index 0e46400678..6efa319632 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go +++ b/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go @@ -1,245 +1,274 @@ package terraform -import "sync" +import ( + "sync" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" +) // MockHook is an implementation of Hook that can be used for tests. // It records all of its function calls. type MockHook struct { sync.Mutex - PreApplyCalled bool - PreApplyInfo *InstanceInfo - PreApplyDiff *InstanceDiff - PreApplyState *InstanceState - PreApplyReturn HookAction - PreApplyError error + PreApplyCalled bool + PreApplyAddr addrs.AbsResourceInstance + PreApplyGen states.Generation + PreApplyAction plans.Action + PreApplyPriorState cty.Value + PreApplyPlannedState cty.Value + PreApplyReturn HookAction + PreApplyError error PostApplyCalled bool - PostApplyInfo *InstanceInfo - PostApplyState *InstanceState + PostApplyAddr addrs.AbsResourceInstance + PostApplyGen states.Generation + PostApplyNewState cty.Value PostApplyError error PostApplyReturn HookAction PostApplyReturnError error - PostApplyFn func(*InstanceInfo, *InstanceState, error) (HookAction, error) - - PreDiffCalled bool - PreDiffInfo *InstanceInfo - PreDiffState *InstanceState - PreDiffReturn HookAction - PreDiffError error - - PostDiffCalled bool - PostDiffInfo *InstanceInfo - PostDiffDiff *InstanceDiff - PostDiffReturn HookAction - PostDiffError error - - PreProvisionResourceCalled bool - PreProvisionResourceInfo *InstanceInfo - PreProvisionInstanceState *InstanceState - PreProvisionResourceReturn HookAction - PreProvisionResourceError error - - PostProvisionResourceCalled bool - PostProvisionResourceInfo *InstanceInfo - PostProvisionInstanceState *InstanceState - PostProvisionResourceReturn HookAction - PostProvisionResourceError error - - PreProvisionCalled bool - PreProvisionInfo *InstanceInfo - PreProvisionProvisionerId string - PreProvisionReturn HookAction - PreProvisionError error - - PostProvisionCalled bool - PostProvisionInfo *InstanceInfo - PostProvisionProvisionerId string - PostProvisionErrorArg error - PostProvisionReturn HookAction - PostProvisionError error - - ProvisionOutputCalled bool - ProvisionOutputInfo *InstanceInfo - ProvisionOutputProvisionerId string - ProvisionOutputMessage string - - PostRefreshCalled bool - PostRefreshInfo *InstanceInfo - PostRefreshState *InstanceState - PostRefreshReturn HookAction - PostRefreshError error - - PreRefreshCalled bool - PreRefreshInfo *InstanceInfo - PreRefreshState *InstanceState - PreRefreshReturn HookAction - PreRefreshError error + PostApplyFn func(addrs.AbsResourceInstance, states.Generation, cty.Value, error) (HookAction, error) + + PreDiffCalled bool + PreDiffAddr addrs.AbsResourceInstance + PreDiffGen states.Generation + PreDiffPriorState cty.Value + PreDiffProposedState cty.Value + PreDiffReturn HookAction + PreDiffError error + + PostDiffCalled bool + PostDiffAddr addrs.AbsResourceInstance + PostDiffGen states.Generation + PostDiffAction plans.Action + PostDiffPriorState cty.Value + PostDiffPlannedState cty.Value + PostDiffReturn HookAction + PostDiffError error + + PreProvisionInstanceCalled bool + PreProvisionInstanceAddr addrs.AbsResourceInstance + PreProvisionInstanceState cty.Value + PreProvisionInstanceReturn HookAction + PreProvisionInstanceError error + + PostProvisionInstanceCalled bool + PostProvisionInstanceAddr addrs.AbsResourceInstance + PostProvisionInstanceState cty.Value + PostProvisionInstanceReturn HookAction + PostProvisionInstanceError error + + PreProvisionInstanceStepCalled bool + PreProvisionInstanceStepAddr addrs.AbsResourceInstance + PreProvisionInstanceStepProvisionerType string + PreProvisionInstanceStepReturn HookAction + PreProvisionInstanceStepError error + + PostProvisionInstanceStepCalled bool + PostProvisionInstanceStepAddr addrs.AbsResourceInstance + PostProvisionInstanceStepProvisionerType string + PostProvisionInstanceStepErrorArg error + PostProvisionInstanceStepReturn HookAction + PostProvisionInstanceStepError error + + ProvisionOutputCalled bool + ProvisionOutputAddr addrs.AbsResourceInstance + ProvisionOutputProvisionerType string + ProvisionOutputMessage string + + PreRefreshCalled bool + PreRefreshAddr addrs.AbsResourceInstance + PreRefreshGen states.Generation + PreRefreshPriorState cty.Value + PreRefreshReturn HookAction + PreRefreshError error + + PostRefreshCalled bool + PostRefreshAddr addrs.AbsResourceInstance + PostRefreshGen states.Generation + PostRefreshPriorState cty.Value + PostRefreshNewState cty.Value + PostRefreshReturn HookAction + PostRefreshError error PreImportStateCalled bool - PreImportStateInfo *InstanceInfo - PreImportStateId string + PreImportStateAddr addrs.AbsResourceInstance + PreImportStateID string PreImportStateReturn HookAction PreImportStateError error - PostImportStateCalled bool - PostImportStateInfo *InstanceInfo - PostImportStateState []*InstanceState - PostImportStateReturn HookAction - PostImportStateError error + PostImportStateCalled bool + PostImportStateAddr addrs.AbsResourceInstance + PostImportStateNewStates []providers.ImportedResource + PostImportStateReturn HookAction + PostImportStateError error PostStateUpdateCalled bool - PostStateUpdateState *State + PostStateUpdateState *states.State PostStateUpdateReturn HookAction PostStateUpdateError error } -func (h *MockHook) PreApply(n *InstanceInfo, s *InstanceState, d *InstanceDiff) (HookAction, error) { +var _ Hook = (*MockHook)(nil) + +func (h *MockHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { h.Lock() defer h.Unlock() h.PreApplyCalled = true - h.PreApplyInfo = n - h.PreApplyDiff = d - h.PreApplyState = s + h.PreApplyAddr = addr + h.PreApplyGen = gen + h.PreApplyAction = action + h.PreApplyPriorState = priorState + h.PreApplyPlannedState = plannedNewState return h.PreApplyReturn, h.PreApplyError } -func (h *MockHook) PostApply(n *InstanceInfo, s *InstanceState, e error) (HookAction, error) { +func (h *MockHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { h.Lock() defer h.Unlock() h.PostApplyCalled = true - h.PostApplyInfo = n - h.PostApplyState = s - h.PostApplyError = e + h.PostApplyAddr = addr + h.PostApplyGen = gen + h.PostApplyNewState = newState + h.PostApplyError = err if h.PostApplyFn != nil { - return h.PostApplyFn(n, s, e) + return h.PostApplyFn(addr, gen, newState, err) } return h.PostApplyReturn, h.PostApplyReturnError } -func (h *MockHook) PreDiff(n *InstanceInfo, s *InstanceState) (HookAction, error) { +func (h *MockHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { h.Lock() defer h.Unlock() h.PreDiffCalled = true - h.PreDiffInfo = n - h.PreDiffState = s + h.PreDiffAddr = addr + h.PreDiffGen = gen + h.PreDiffPriorState = priorState + h.PreDiffProposedState = proposedNewState return h.PreDiffReturn, h.PreDiffError } -func (h *MockHook) PostDiff(n *InstanceInfo, d *InstanceDiff) (HookAction, error) { +func (h *MockHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { h.Lock() defer h.Unlock() h.PostDiffCalled = true - h.PostDiffInfo = n - h.PostDiffDiff = d + h.PostDiffAddr = addr + h.PostDiffGen = gen + h.PostDiffAction = action + h.PostDiffPriorState = priorState + h.PostDiffPlannedState = plannedNewState return h.PostDiffReturn, h.PostDiffError } -func (h *MockHook) PreProvisionResource(n *InstanceInfo, s *InstanceState) (HookAction, error) { +func (h *MockHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { h.Lock() defer h.Unlock() - h.PreProvisionResourceCalled = true - h.PreProvisionResourceInfo = n - h.PreProvisionInstanceState = s - return h.PreProvisionResourceReturn, h.PreProvisionResourceError + h.PreProvisionInstanceCalled = true + h.PreProvisionInstanceAddr = addr + h.PreProvisionInstanceState = state + return h.PreProvisionInstanceReturn, h.PreProvisionInstanceError } -func (h *MockHook) PostProvisionResource(n *InstanceInfo, s *InstanceState) (HookAction, error) { +func (h *MockHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { h.Lock() defer h.Unlock() - h.PostProvisionResourceCalled = true - h.PostProvisionResourceInfo = n - h.PostProvisionInstanceState = s - return h.PostProvisionResourceReturn, h.PostProvisionResourceError + h.PostProvisionInstanceCalled = true + h.PostProvisionInstanceAddr = addr + h.PostProvisionInstanceState = state + return h.PostProvisionInstanceReturn, h.PostProvisionInstanceError } -func (h *MockHook) PreProvision(n *InstanceInfo, provId string) (HookAction, error) { +func (h *MockHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { h.Lock() defer h.Unlock() - h.PreProvisionCalled = true - h.PreProvisionInfo = n - h.PreProvisionProvisionerId = provId - return h.PreProvisionReturn, h.PreProvisionError + h.PreProvisionInstanceStepCalled = true + h.PreProvisionInstanceStepAddr = addr + h.PreProvisionInstanceStepProvisionerType = typeName + return h.PreProvisionInstanceStepReturn, h.PreProvisionInstanceStepError } -func (h *MockHook) PostProvision(n *InstanceInfo, provId string, err error) (HookAction, error) { +func (h *MockHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { h.Lock() defer h.Unlock() - h.PostProvisionCalled = true - h.PostProvisionInfo = n - h.PostProvisionProvisionerId = provId - h.PostProvisionErrorArg = err - return h.PostProvisionReturn, h.PostProvisionError + h.PostProvisionInstanceStepCalled = true + h.PostProvisionInstanceStepAddr = addr + h.PostProvisionInstanceStepProvisionerType = typeName + h.PostProvisionInstanceStepErrorArg = err + return h.PostProvisionInstanceStepReturn, h.PostProvisionInstanceStepError } -func (h *MockHook) ProvisionOutput( - n *InstanceInfo, - provId string, - msg string) { +func (h *MockHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { h.Lock() defer h.Unlock() h.ProvisionOutputCalled = true - h.ProvisionOutputInfo = n - h.ProvisionOutputProvisionerId = provId - h.ProvisionOutputMessage = msg + h.ProvisionOutputAddr = addr + h.ProvisionOutputProvisionerType = typeName + h.ProvisionOutputMessage = line } -func (h *MockHook) PreRefresh(n *InstanceInfo, s *InstanceState) (HookAction, error) { +func (h *MockHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { h.Lock() defer h.Unlock() h.PreRefreshCalled = true - h.PreRefreshInfo = n - h.PreRefreshState = s + h.PreRefreshAddr = addr + h.PreRefreshGen = gen + h.PreRefreshPriorState = priorState return h.PreRefreshReturn, h.PreRefreshError } -func (h *MockHook) PostRefresh(n *InstanceInfo, s *InstanceState) (HookAction, error) { +func (h *MockHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { h.Lock() defer h.Unlock() h.PostRefreshCalled = true - h.PostRefreshInfo = n - h.PostRefreshState = s + h.PostRefreshAddr = addr + h.PostRefreshPriorState = priorState + h.PostRefreshNewState = newState return h.PostRefreshReturn, h.PostRefreshError } -func (h *MockHook) PreImportState(info *InstanceInfo, id string) (HookAction, error) { +func (h *MockHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { h.Lock() defer h.Unlock() h.PreImportStateCalled = true - h.PreImportStateInfo = info - h.PreImportStateId = id + h.PreImportStateAddr = addr + h.PreImportStateID = importID return h.PreImportStateReturn, h.PreImportStateError } -func (h *MockHook) PostImportState(info *InstanceInfo, s []*InstanceState) (HookAction, error) { +func (h *MockHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { h.Lock() defer h.Unlock() h.PostImportStateCalled = true - h.PostImportStateInfo = info - h.PostImportStateState = s + h.PostImportStateAddr = addr + h.PostImportStateNewStates = imported return h.PostImportStateReturn, h.PostImportStateError } -func (h *MockHook) PostStateUpdate(s *State) (HookAction, error) { +func (h *MockHook) PostStateUpdate(new *states.State) (HookAction, error) { h.Lock() defer h.Unlock() h.PostStateUpdateCalled = true - h.PostStateUpdateState = s + h.PostStateUpdateState = new return h.PostStateUpdateReturn, h.PostStateUpdateError } diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go b/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go index 104d0098a1..811fb337cc 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go +++ b/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go @@ -2,6 +2,13 @@ package terraform import ( "sync/atomic" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" ) // stopHook is a private Hook implementation that Terraform uses to @@ -10,63 +17,69 @@ type stopHook struct { stop uint32 } -func (h *stopHook) PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) { +var _ Hook = (*stopHook)(nil) + +func (h *stopHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { return h.hook() } -func (h *stopHook) PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) { +func (h *stopHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { return h.hook() } -func (h *stopHook) PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) { +func (h *stopHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { return h.hook() } -func (h *stopHook) PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) { +func (h *stopHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { return h.hook() } -func (h *stopHook) PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) { +func (h *stopHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { return h.hook() } -func (h *stopHook) PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) { +func (h *stopHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { return h.hook() } -func (h *stopHook) PreProvision(*InstanceInfo, string) (HookAction, error) { +func (h *stopHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { return h.hook() } -func (h *stopHook) PostProvision(*InstanceInfo, string, error) (HookAction, error) { +func (h *stopHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { return h.hook() } -func (h *stopHook) ProvisionOutput(*InstanceInfo, string, string) { +func (h *stopHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { } -func (h *stopHook) PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) { +func (h *stopHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { return h.hook() } -func (h *stopHook) PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) { +func (h *stopHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { return h.hook() } -func (h *stopHook) PreImportState(*InstanceInfo, string) (HookAction, error) { +func (h *stopHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { return h.hook() } -func (h *stopHook) PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) { +func (h *stopHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { return h.hook() } -func (h *stopHook) PostStateUpdate(*State) (HookAction, error) { +func (h *stopHook) PostStateUpdate(new *states.State) (HookAction, error) { return h.hook() } func (h *stopHook) hook() (HookAction, error) { if h.Stopped() { + // FIXME: This should really return an error since stopping partway + // through is not a successful run-to-completion, but we'll need to + // introduce that cautiously since existing automation solutions may + // be depending on this behavior. return HookActionHalt, nil } diff --git a/vendor/github.com/hashicorp/terraform/terraform/interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go index 4f4e178cf3..26c185751a 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/interpolate.go +++ b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go @@ -45,65 +45,7 @@ type InterpolationScope struct { func (i *Interpolater) Values( scope *InterpolationScope, vars map[string]config.InterpolatedVariable) (map[string]ast.Variable, error) { - if scope == nil { - scope = &InterpolationScope{} - } - - result := make(map[string]ast.Variable, len(vars)) - - // Copy the default variables - if i.Module != nil && scope != nil { - mod := i.Module - if len(scope.Path) > 1 { - mod = i.Module.Child(scope.Path[1:]) - } - for _, v := range mod.Config().Variables { - // Set default variables - if v.Default == nil { - continue - } - - n := fmt.Sprintf("var.%s", v.Name) - variable, err := hil.InterfaceToVariable(v.Default) - if err != nil { - return nil, fmt.Errorf("invalid default map value for %s: %v", v.Name, v.Default) - } - - result[n] = variable - } - } - - for n, rawV := range vars { - var err error - switch v := rawV.(type) { - case *config.CountVariable: - err = i.valueCountVar(scope, n, v, result) - case *config.ModuleVariable: - err = i.valueModuleVar(scope, n, v, result) - case *config.PathVariable: - err = i.valuePathVar(scope, n, v, result) - case *config.ResourceVariable: - err = i.valueResourceVar(scope, n, v, result) - case *config.SelfVariable: - err = i.valueSelfVar(scope, n, v, result) - case *config.SimpleVariable: - err = i.valueSimpleVar(scope, n, v, result) - case *config.TerraformVariable: - err = i.valueTerraformVar(scope, n, v, result) - case *config.LocalVariable: - err = i.valueLocalVar(scope, n, v, result) - case *config.UserVariable: - err = i.valueUserVar(scope, n, v, result) - default: - err = fmt.Errorf("%s: unknown variable type: %T", n, rawV) - } - - if err != nil { - return nil, err - } - } - - return result, nil + return nil, fmt.Errorf("type Interpolator is no longer supported; use the evaluator API instead") } func (i *Interpolater) valueCountVar( @@ -153,7 +95,7 @@ func (i *Interpolater) valueModuleVar( defer i.StateLock.RUnlock() // Get the module where we're looking for the value - mod := i.State.ModuleByPath(path) + mod := i.State.ModuleByPath(normalizeModulePath(path)) if mod == nil { // If the module doesn't exist, then we can return an empty string. // This happens usually only in Refresh() when we haven't populated @@ -257,13 +199,13 @@ func (i *Interpolater) valueResourceVar( } if variable == nil { - // During the input walk we tolerate missing variables because + // During the refresh walk we tolerate missing variables because // we haven't yet had a chance to refresh state, so dynamic data may // not yet be complete. // If it truly is missing, we'll catch it on a later walk. // This applies only to graph nodes that interpolate during the - // config walk, e.g. providers. - if i.Operation == walkInput || i.Operation == walkRefresh { + // refresh walk, e.g. providers. + if i.Operation == walkRefresh { result[n] = unknownVariable() return nil } @@ -365,7 +307,7 @@ func (i *Interpolater) valueLocalVar( } // Get the relevant module - module := i.State.ModuleByPath(scope.Path) + module := i.State.ModuleByPath(normalizeModulePath(scope.Path)) if module == nil { result[n] = unknownVariable() return nil @@ -584,10 +526,7 @@ MISSING: // // For a Destroy, we're also fine with computed values, since our goal is // only to get destroy nodes for existing resources. - // - // For an input walk, computed values are okay to return because we're only - // looking for missing variables to prompt the user for. - if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkInput { + if i.Operation == walkRefresh || i.Operation == walkPlanDestroy { return &unknownVariable, nil } @@ -607,13 +546,6 @@ func (i *Interpolater) computeResourceMultiVariable( unknownVariable := unknownVariable() - // If we're only looking for input, we don't need to expand a - // multi-variable. This prevents us from encountering things that should be - // known but aren't because the state has yet to be refreshed. - if i.Operation == walkInput { - return &unknownVariable, nil - } - // Get the information about this resource variable, and verify // that it exists and such. module, cr, err := i.resourceVariableInfo(scope, v) @@ -695,7 +627,7 @@ func (i *Interpolater) computeResourceMultiVariable( // // For an input walk, computed values are okay to return because we're only // looking for missing variables to prompt the user for. - if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkDestroy || i.Operation == walkInput { + if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkDestroy { return &unknownVariable, nil } @@ -776,7 +708,7 @@ func (i *Interpolater) resourceVariableInfo( } // Get the relevant module - module := i.State.ModuleByPath(scope.Path) + module := i.State.ModuleByPath(normalizeModulePath(scope.Path)) return module, cr, nil } diff --git a/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go b/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go index 4594cb6033..66a68c7de8 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go +++ b/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go @@ -1,84 +1,135 @@ package terraform import ( - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" + version "github.com/hashicorp/go-version" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/moduledeps" "github.com/hashicorp/terraform/plugin/discovery" + "github.com/hashicorp/terraform/states" ) -// ModuleTreeDependencies returns the dependencies of the tree of modules -// described by the given configuration tree and state. +// ConfigTreeDependencies returns the dependencies of the tree of modules +// described by the given configuration and state. // // Both configuration and state are required because there can be resources // implied by instances in the state that no longer exist in config. -// -// This function will panic if any invalid version constraint strings are -// present in the configuration. This is guaranteed not to happen for any -// configuration that has passed a call to Config.Validate(). -func ModuleTreeDependencies(root *module.Tree, state *State) *moduledeps.Module { +func ConfigTreeDependencies(root *configs.Config, state *states.State) *moduledeps.Module { // First we walk the configuration tree to build the overall structure // and capture the explicit/implicit/inherited provider dependencies. - deps := moduleTreeConfigDependencies(root, nil) + deps := configTreeConfigDependencies(root, nil) // Next we walk over the resources in the state to catch any additional // dependencies created by existing resources that are no longer in config. // Most things we find in state will already be present in 'deps', but // we're interested in the rare thing that isn't. - moduleTreeMergeStateDependencies(deps, state) + configTreeMergeStateDependencies(deps, state) return deps } -func moduleTreeConfigDependencies(root *module.Tree, inheritProviders map[string]*config.ProviderConfig) *moduledeps.Module { +func configTreeConfigDependencies(root *configs.Config, inheritProviders map[string]*configs.Provider) *moduledeps.Module { if root == nil { // If no config is provided, we'll make a synthetic root. // This isn't necessarily correct if we're called with a nil that // *isn't* at the root, but in practice that can never happen. return &moduledeps.Module{ - Name: "root", + Name: "root", + Providers: make(moduledeps.Providers), } } + name := "root" + if len(root.Path) != 0 { + name = root.Path[len(root.Path)-1] + } + ret := &moduledeps.Module{ - Name: root.Name(), + Name: name, } - cfg := root.Config() - providerConfigs := cfg.ProviderConfigsByFullName() + module := root.Module // Provider dependencies { - providers := make(moduledeps.Providers, len(providerConfigs)) + providers := make(moduledeps.Providers) - // Any providerConfigs elements are *explicit* provider dependencies, - // which is the only situation where the user might provide an actual - // version constraint. We'll take care of these first. - for fullName, pCfg := range providerConfigs { + // The main way to declare a provider dependency is explicitly inside + // the "terraform" block, which allows declaring a requirement without + // also creating a configuration. + for fullName, constraints := range module.ProviderRequirements { inst := moduledeps.ProviderInstance(fullName) - versionSet := discovery.AllVersions - if pCfg.Version != "" { - versionSet = discovery.ConstraintStr(pCfg.Version).MustParse() + + // The handling here is a bit fiddly because the moduledeps package + // was designed around the legacy (pre-0.12) configuration model + // and hasn't yet been revised to handle the new model. As a result, + // we need to do some translation here. + // FIXME: Eventually we should adjust the underlying model so we + // can also retain the source location of each constraint, for + // more informative output from the "terraform providers" command. + var rawConstraints version.Constraints + for _, constraint := range constraints { + rawConstraints = append(rawConstraints, constraint.Required...) } + discoConstraints := discovery.NewConstraints(rawConstraints) + providers[inst] = moduledeps.ProviderDependency{ - Constraints: versionSet, + Constraints: discoConstraints, Reason: moduledeps.ProviderDependencyExplicit, } } + // Provider configurations can also include version constraints, + // allowing for more terse declaration in situations where both a + // configuration and a constraint are defined in the same module. + for fullName, pCfg := range module.ProviderConfigs { + inst := moduledeps.ProviderInstance(fullName) + discoConstraints := discovery.AllVersions + if pCfg.Version.Required != nil { + discoConstraints = discovery.NewConstraints(pCfg.Version.Required) + } + if existing, exists := providers[inst]; exists { + existing.Constraints = existing.Constraints.Append(discoConstraints) + } else { + providers[inst] = moduledeps.ProviderDependency{ + Constraints: discoConstraints, + Reason: moduledeps.ProviderDependencyExplicit, + } + } + } + // Each resource in the configuration creates an *implicit* provider // dependency, though we'll only record it if there isn't already // an explicit dependency on the same provider. - for _, rc := range cfg.Resources { - fullName := rc.ProviderFullName() - inst := moduledeps.ProviderInstance(fullName) + for _, rc := range module.ManagedResources { + addr := rc.ProviderConfigAddr() + inst := moduledeps.ProviderInstance(addr.StringCompact()) + if _, exists := providers[inst]; exists { + // Explicit dependency already present + continue + } + + reason := moduledeps.ProviderDependencyImplicit + if _, inherited := inheritProviders[addr.StringCompact()]; inherited { + reason = moduledeps.ProviderDependencyInherited + } + + providers[inst] = moduledeps.ProviderDependency{ + Constraints: discovery.AllVersions, + Reason: reason, + } + } + for _, rc := range module.DataResources { + addr := rc.ProviderConfigAddr() + inst := moduledeps.ProviderInstance(addr.StringCompact()) if _, exists := providers[inst]; exists { // Explicit dependency already present continue } reason := moduledeps.ProviderDependencyImplicit - if _, inherited := inheritProviders[fullName]; inherited { + if _, inherited := inheritProviders[addr.String()]; inherited { reason = moduledeps.ProviderDependencyInherited } @@ -91,31 +142,31 @@ func moduleTreeConfigDependencies(root *module.Tree, inheritProviders map[string ret.Providers = providers } - childInherit := make(map[string]*config.ProviderConfig) + childInherit := make(map[string]*configs.Provider) for k, v := range inheritProviders { childInherit[k] = v } - for k, v := range providerConfigs { + for k, v := range module.ProviderConfigs { childInherit[k] = v } - for _, c := range root.Children() { - ret.Children = append(ret.Children, moduleTreeConfigDependencies(c, childInherit)) + for _, c := range root.Children { + ret.Children = append(ret.Children, configTreeConfigDependencies(c, childInherit)) } return ret } -func moduleTreeMergeStateDependencies(root *moduledeps.Module, state *State) { +func configTreeMergeStateDependencies(root *moduledeps.Module, state *states.State) { if state == nil { return } - findModule := func(path []string) *moduledeps.Module { + findModule := func(path addrs.ModuleInstance) *moduledeps.Module { module := root - for _, name := range path[1:] { // skip initial "root" + for _, step := range path { var next *moduledeps.Module for _, cm := range module.Children { - if cm.Name == name { + if cm.Name == step.Name { next = cm break } @@ -124,7 +175,8 @@ func moduleTreeMergeStateDependencies(root *moduledeps.Module, state *State) { if next == nil { // If we didn't find a next node, we'll need to make one next = &moduledeps.Module{ - Name: name, + Name: step.Name, + Providers: make(moduledeps.Providers), } module.Children = append(module.Children, next) } @@ -135,15 +187,11 @@ func moduleTreeMergeStateDependencies(root *moduledeps.Module, state *State) { } for _, ms := range state.Modules { - module := findModule(ms.Path) + module := findModule(ms.Addr) - for _, is := range ms.Resources { - fullName := config.ResourceProviderFullName(is.Type, is.Provider) - inst := moduledeps.ProviderInstance(fullName) + for _, rs := range ms.Resources { + inst := moduledeps.ProviderInstance(rs.ProviderConfig.ProviderConfig.StringCompact()) if _, exists := module.Providers[inst]; !exists { - if module.Providers == nil { - module.Providers = make(moduledeps.Providers) - } module.Providers[inst] = moduledeps.ProviderDependency{ Constraints: discovery.AllVersions, Reason: moduledeps.ProviderDependencyFromState, @@ -151,5 +199,4 @@ func moduleTreeMergeStateDependencies(root *moduledeps.Module, state *State) { } } } - } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go index bd32c79f34..e4952039c0 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go @@ -1,14 +1,22 @@ package terraform -// NodeCountBoundary fixes any "count boundarie" in the state: resources -// that are named "foo.0" when they should be named "foo" -type NodeCountBoundary struct{} +import ( + "github.com/hashicorp/terraform/configs" +) + +// NodeCountBoundary fixes up any transitions between "each modes" in objects +// saved in state, such as switching from NoEach to EachInt. +type NodeCountBoundary struct { + Config *configs.Config +} func (n *NodeCountBoundary) Name() string { - return "meta.count-boundary (count boundary fixup)" + return "meta.count-boundary (EachMode fixup)" } // GraphNodeEvalable func (n *NodeCountBoundary) EvalTree() EvalNode { - return &EvalCountFixZeroOneBoundaryGlobal{} + return &EvalCountFixZeroOneBoundaryGlobal{ + Config: n.Config, + } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go index e32cea8825..6ba39904d6 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go @@ -1,22 +1,40 @@ package terraform -// NodeDestroyableDataResource represents a resource that is "plannable": -// it is ready to be planned in order to create a diff. -type NodeDestroyableDataResource struct { - *NodeAbstractResource +import ( + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" +) + +// NodeDestroyableDataResourceInstance represents a resource that is "destroyable": +// it is ready to be destroyed. +type NodeDestroyableDataResourceInstance struct { + *NodeAbstractResourceInstance } // GraphNodeEvalable -func (n *NodeDestroyableDataResource) EvalTree() EvalNode { - addr := n.NodeAbstractResource.Addr +func (n *NodeDestroyableDataResourceInstance) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() - // stateId is the ID to put into the state - stateId := addr.stateId() + var providerSchema *ProviderSchema + // We don't need the provider, but we're calling EvalGetProvider to load the + // schema. + var provider providers.Interface // Just destroy it. - var state *InstanceState - return &EvalWriteState{ - Name: stateId, - State: &state, // state is nil here + var state *states.ResourceInstanceObject + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + &EvalWriteState{ + Addr: addr.Resource, + State: &state, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + }, + }, } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go index d5ca641a6f..a086214d4e 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go @@ -2,46 +2,66 @@ package terraform import ( "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" ) -// NodeRefreshableDataResource represents a resource that is "plannable": -// it is ready to be planned in order to create a diff. +// NodeRefreshableDataResource represents a resource that is "refreshable". type NodeRefreshableDataResource struct { - *NodeAbstractCountResource + *NodeAbstractResource } +var ( + _ GraphNodeSubPath = (*NodeRefreshableDataResource)(nil) + _ GraphNodeDynamicExpandable = (*NodeRefreshableDataResource)(nil) + _ GraphNodeReferenceable = (*NodeRefreshableDataResource)(nil) + _ GraphNodeReferencer = (*NodeRefreshableDataResource)(nil) + _ GraphNodeResource = (*NodeRefreshableDataResource)(nil) + _ GraphNodeAttachResourceConfig = (*NodeRefreshableDataResource)(nil) +) + // GraphNodeDynamicExpandable func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - // Grab the state which we read - state, lock := ctx.State() - lock.RLock() - defer lock.RUnlock() - - // Expand the resource count which must be available by now from EvalTree - count, err := n.Config.Count() - if err != nil { - return nil, err + var diags tfdiags.Diagnostics + + count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx) + diags = diags.Append(countDiags) + if countDiags.HasErrors() { + return nil, diags.Err() } + // Next we need to potentially rename an instance address in the state + // if we're transitioning whether "count" is set at all. + fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1) + + // Our graph transformers require access to the full state, so we'll + // temporarily lock it while we work on this. + state := ctx.State().Lock() + defer ctx.State().Unlock() + // The concrete resource factory we'll use - concreteResource := func(a *NodeAbstractResource) dag.Vertex { + concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex { // Add the config and state since we don't do that via transforms a.Config = n.Config a.ResolvedProvider = n.ResolvedProvider return &NodeRefreshableDataResourceInstance{ - NodeAbstractResource: a, + NodeAbstractResourceInstance: a, } } // We also need a destroyable resource for orphans that are a result of a // scaled-in count. - concreteResourceDestroyable := func(a *NodeAbstractResource) dag.Vertex { - // Add the config since we don't do that via transforms + concreteResourceDestroyable := func(a *NodeAbstractResourceInstance) dag.Vertex { + // Add the config and provider since we don't do that via transforms a.Config = n.Config + a.ResolvedProvider = n.ResolvedProvider - return &NodeDestroyableDataResource{ - NodeAbstractResource: a, + return &NodeDestroyableDataResourceInstance{ + NodeAbstractResourceInstance: a, } } @@ -50,6 +70,7 @@ func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, er // Expand the count. &ResourceCountTransformer{ Concrete: concreteResource, + Schema: n.Schema, Count: count, Addr: n.ResourceAddr(), }, @@ -67,7 +88,7 @@ func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, er &AttachStateTransformer{State: state}, // Targeting - &TargetsTransformer{ParsedTargets: n.Targets}, + &TargetsTransformer{Targets: n.Targets}, // Connect references so ordering is correct &ReferenceTransformer{}, @@ -83,139 +104,118 @@ func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, er Name: "NodeRefreshableDataResource", } - return b.Build(ctx.Path()) + graph, diags := b.Build(ctx.Path()) + return graph, diags.ErrWithWarnings() } -// NodeRefreshableDataResourceInstance represents a _single_ resource instance +// NodeRefreshableDataResourceInstance represents a single resource instance // that is refreshable. type NodeRefreshableDataResourceInstance struct { - *NodeAbstractResource + *NodeAbstractResourceInstance } // GraphNodeEvalable func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode { - addr := n.NodeAbstractResource.Addr - - // stateId is the ID to put into the state - stateId := addr.stateId() - - // Build the instance info. More of this will be populated during eval - info := &InstanceInfo{ - Id: stateId, - Type: addr.Type, - } + addr := n.ResourceInstanceAddr() - // Get the state if we have it, if not we build it - rs := n.ResourceState - if rs == nil { - rs = &ResourceState{ - Provider: n.ResolvedProvider, - } - } - - // If the config isn't empty we update the state - if n.Config != nil { - rs = &ResourceState{ - Type: n.Config.Type, - Provider: n.Config.Provider, - Dependencies: n.StateReferences(), - } - } - - // Build the resource for eval - resource := &Resource{ - Name: addr.Name, - Type: addr.Type, - CountIndex: addr.Index, - } - if resource.CountIndex < 0 { - resource.CountIndex = 0 - } - - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var config *ResourceConfig - var diff *InstanceDiff - var provider ResourceProvider - var state *InstanceState + // These variables are the state for the eval sequence below, and are + // updated through pointers. + var provider providers.Interface + var providerSchema *ProviderSchema + var change *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject + var configVal cty.Value return &EvalSequence{ Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + // Always destroy the existing state first, since we must // make sure that values from a previous read will not // get interpolated if we end up needing to defer our // loading until apply time. &EvalWriteState{ - Name: stateId, - ResourceType: rs.Type, - Provider: n.ResolvedProvider, - Dependencies: rs.Dependencies, - State: &state, // state is nil here - }, - - &EvalInterpolate{ - Config: n.Config.RawConfig.Copy(), - Resource: resource, - Output: &config, + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + State: &state, // a pointer to nil, here + ProviderSchema: &providerSchema, }, - // The rest of this pass can proceed only if there are no - // computed values in our config. - // (If there are, we'll deal with this during the plan and - // apply phases.) &EvalIf{ If: func(ctx EvalContext) (bool, error) { - if config.ComputedKeys != nil && len(config.ComputedKeys) > 0 { - return true, EvalEarlyExitError{} - } - // If the config explicitly has a depends_on for this // data source, assume the intention is to prevent - // refreshing ahead of that dependency. + // refreshing ahead of that dependency, and therefore + // we need to deal with this resource during the apply + // phase.. if len(n.Config.DependsOn) > 0 { return true, EvalEarlyExitError{} } return true, nil }, - Then: EvalNoop{}, }, - // The remainder of this pass is the same as running - // a "plan" pass immediately followed by an "apply" pass, - // populating the state early so it'll be available to - // provider configurations that need this data during - // refresh/plan. - &EvalGetProvider{ - Name: n.ResolvedProvider, - Output: &provider, + // EvalReadData will _attempt_ to read the data source, but may + // generate an incomplete planned object if the configuration + // includes values that won't be known until apply. + &EvalReadData{ + Addr: addr.Resource, + Config: n.Config, + Dependencies: n.StateReferences(), + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + OutputChange: &change, + OutputConfigValue: &configVal, + OutputState: &state, }, - &EvalReadDataDiff{ - Info: info, - Config: &config, - Provider: &provider, - Output: &diff, - OutputState: &state, - }, - - &EvalReadDataApply{ - Info: info, - Diff: &diff, - Provider: &provider, - Output: &state, - }, - - &EvalWriteState{ - Name: stateId, - ResourceType: rs.Type, - Provider: n.ResolvedProvider, - Dependencies: rs.Dependencies, - State: &state, + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + return (*state).Status != states.ObjectPlanned, nil + }, + Then: &EvalSequence{ + Nodes: []EvalNode{ + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + State: &state, + ProviderSchema: &providerSchema, + }, + &EvalUpdateStateHook{}, + }, + }, + Else: &EvalSequence{ + // We can't deal with this yet, so we'll repeat this step + // during the plan walk to produce a planned change to read + // this during the apply walk. However, we do still need to + // save the generated change and partial state so that + // results from it can be included in other data resources + // or provider configurations during the refresh walk. + // (The planned object we save in the state here will be + // pruned out at the end of the refresh walk, returning + // it back to being unset again for subsequent walks.) + Nodes: []EvalNode{ + &EvalWriteDiff{ + Addr: addr.Resource, + Change: &change, + ProviderSchema: &providerSchema, + }, + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + State: &state, + ProviderSchema: &providerSchema, + }, + }, + }, }, - - &EvalUpdateStateHook{}, }, } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_local.go b/vendor/github.com/hashicorp/terraform/terraform/node_local.go index d38722267e..591eb305a6 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_local.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_local.go @@ -1,10 +1,10 @@ package terraform import ( - "fmt" - "strings" - - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/lang" ) // NodeLocal represents a named local value in a particular module. @@ -12,22 +12,26 @@ import ( // Local value nodes only have one operation, common to all walk types: // evaluate the result and place it in state. type NodeLocal struct { - PathValue []string - Config *config.Local + Addr addrs.AbsLocalValue + Config *configs.Local } -func (n *NodeLocal) Name() string { - result := fmt.Sprintf("local.%s", n.Config.Name) - if len(n.PathValue) > 1 { - result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) - } +var ( + _ GraphNodeSubPath = (*NodeLocal)(nil) + _ RemovableIfNotTargeted = (*NodeLocal)(nil) + _ GraphNodeReferenceable = (*NodeLocal)(nil) + _ GraphNodeReferencer = (*NodeLocal)(nil) + _ GraphNodeEvalable = (*NodeLocal)(nil) + _ dag.GraphNodeDotter = (*NodeLocal)(nil) +) - return result +func (n *NodeLocal) Name() string { + return n.Addr.String() } // GraphNodeSubPath -func (n *NodeLocal) Path() []string { - return n.PathValue +func (n *NodeLocal) Path() addrs.ModuleInstance { + return n.Addr.Module } // RemovableIfNotTargeted @@ -36,31 +40,31 @@ func (n *NodeLocal) RemoveIfNotTargeted() bool { } // GraphNodeReferenceable -func (n *NodeLocal) ReferenceableName() []string { - name := fmt.Sprintf("local.%s", n.Config.Name) - return []string{name} +func (n *NodeLocal) ReferenceableAddrs() []addrs.Referenceable { + return []addrs.Referenceable{n.Addr.LocalValue} } // GraphNodeReferencer -func (n *NodeLocal) References() []string { - var result []string - result = append(result, ReferencesFromConfig(n.Config.RawConfig)...) - for _, v := range result { - split := strings.Split(v, "/") - for i, s := range split { - split[i] = s + ".destroy" - } - - result = append(result, strings.Join(split, "/")) - } - - return result +func (n *NodeLocal) References() []*addrs.Reference { + refs, _ := lang.ReferencesInExpr(n.Config.Expr) + return appendResourceDestroyReferences(refs) } // GraphNodeEvalable func (n *NodeLocal) EvalTree() EvalNode { return &EvalLocal{ - Name: n.Config.Name, - Value: n.Config.RawConfig, + Addr: n.Addr.LocalValue, + Expr: n.Config.Expr, + } +} + +// dag.GraphNodeDotter impl. +func (n *NodeLocal) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "note", + }, } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go index bb3e5ee1e0..cb55a1a88f 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go @@ -2,76 +2,80 @@ package terraform import ( "fmt" - "log" - "reflect" + + "github.com/hashicorp/terraform/addrs" ) // NodeModuleRemoved represents a module that is no longer in the // config. type NodeModuleRemoved struct { - PathValue []string + Addr addrs.ModuleInstance } +var ( + _ GraphNodeSubPath = (*NodeModuleRemoved)(nil) + _ GraphNodeEvalable = (*NodeModuleRemoved)(nil) + _ GraphNodeReferencer = (*NodeModuleRemoved)(nil) + _ GraphNodeReferenceOutside = (*NodeModuleRemoved)(nil) +) + func (n *NodeModuleRemoved) Name() string { - return fmt.Sprintf("%s (removed)", modulePrefixStr(n.PathValue)) + return fmt.Sprintf("%s (removed)", n.Addr.String()) } // GraphNodeSubPath -func (n *NodeModuleRemoved) Path() []string { - return n.PathValue +func (n *NodeModuleRemoved) Path() addrs.ModuleInstance { + return n.Addr } // GraphNodeEvalable func (n *NodeModuleRemoved) EvalTree() EvalNode { return &EvalOpFilter{ Ops: []walkOperation{walkRefresh, walkApply, walkDestroy}, - Node: &EvalDeleteModule{ - PathValue: n.PathValue, + Node: &EvalCheckModuleRemoved{ + Addr: n.Addr, }, } } -func (n *NodeModuleRemoved) ReferenceGlobal() bool { - return true +func (n *NodeModuleRemoved) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) { + // Our "References" implementation indicates that this node depends on + // the call to the module it represents, which implicitly depends on + // everything inside the module. That reference must therefore be + // interpreted in terms of our parent module. + return n.Addr, n.Addr.Parent() } -func (n *NodeModuleRemoved) References() []string { - return []string{modulePrefixStr(n.PathValue)} -} +func (n *NodeModuleRemoved) References() []*addrs.Reference { + // We depend on the call to the module we represent, because that + // implicitly then depends on everything inside that module. + // Our ReferenceOutside implementation causes this to be interpreted + // within the parent module. -// EvalDeleteModule is an EvalNode implementation that removes an empty module -// entry from the state. -type EvalDeleteModule struct { - PathValue []string -} + _, call := n.Addr.CallInstance() + return []*addrs.Reference{ + { + Subject: call, -func (n *EvalDeleteModule) Eval(ctx EvalContext) (interface{}, error) { - state, lock := ctx.State() - if state == nil { - return nil, nil + // No source range here, because there's nothing reasonable for + // us to return. + }, } +} - // Get a write lock so we can access this instance - lock.Lock() - defer lock.Unlock() - - // Make sure we have a clean state - // Destroyed resources aren't deleted, they're written with an ID of "". - state.prune() +// EvalCheckModuleRemoved is an EvalNode implementation that verifies that +// a module has been removed from the state as expected. +type EvalCheckModuleRemoved struct { + Addr addrs.ModuleInstance +} - // find the module and delete it - for i, m := range state.Modules { - if reflect.DeepEqual(m.Path, n.PathValue) { - if !m.Empty() { - // a targeted apply may leave module resources even without a config, - // so just log this and return. - log.Printf("[DEBUG] cannot remove module %s, not empty", modulePrefixStr(n.PathValue)) - break - } - state.Modules = append(state.Modules[:i], state.Modules[i+1:]...) - break - } +func (n *EvalCheckModuleRemoved) Eval(ctx EvalContext) (interface{}, error) { + mod := ctx.State().Module(n.Addr) + if mod != nil { + // If we get here then that indicates a bug either in the states + // module or in an earlier step of the graph walk, since we should've + // pruned out the module when the last resource was removed from it. + return nil, fmt.Errorf("leftover module %s in state that should have been removed; this is a bug in Terraform and should be reported", n.Addr) } - return nil, nil } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go index 66ff7d5e3b..aca5a6a3fd 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go @@ -1,40 +1,43 @@ package terraform import ( - "fmt" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/lang" + "github.com/zclconf/go-cty/cty" ) // NodeApplyableModuleVariable represents a module variable input during // the apply step. type NodeApplyableModuleVariable struct { - PathValue []string - Config *config.Variable // Config is the var in the config - Value *config.RawConfig // Value is the value that is set - - Module *module.Tree // Antiquated, want to remove + Addr addrs.AbsInputVariableInstance + Config *configs.Variable // Config is the var in the config + Expr hcl.Expression // Expr is the value expression given in the call } -func (n *NodeApplyableModuleVariable) Name() string { - result := fmt.Sprintf("var.%s", n.Config.Name) - if len(n.PathValue) > 1 { - result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) - } +// Ensure that we are implementing all of the interfaces we think we are +// implementing. +var ( + _ GraphNodeSubPath = (*NodeApplyableModuleVariable)(nil) + _ RemovableIfNotTargeted = (*NodeApplyableModuleVariable)(nil) + _ GraphNodeReferenceOutside = (*NodeApplyableModuleVariable)(nil) + _ GraphNodeReferenceable = (*NodeApplyableModuleVariable)(nil) + _ GraphNodeReferencer = (*NodeApplyableModuleVariable)(nil) + _ GraphNodeEvalable = (*NodeApplyableModuleVariable)(nil) + _ dag.GraphNodeDotter = (*NodeApplyableModuleVariable)(nil) +) - return result +func (n *NodeApplyableModuleVariable) Name() string { + return n.Addr.String() } // GraphNodeSubPath -func (n *NodeApplyableModuleVariable) Path() []string { - // We execute in the parent scope (above our own module) so that - // we can access the proper interpolations. - if len(n.PathValue) > 2 { - return n.PathValue[:len(n.PathValue)-1] - } - - return rootModulePath +func (n *NodeApplyableModuleVariable) Path() addrs.ModuleInstance { + // We execute in the parent scope (above our own module) because + // expressions in our value are resolved in that context. + return n.Addr.Module.Parent() } // RemovableIfNotTargeted @@ -44,95 +47,96 @@ func (n *NodeApplyableModuleVariable) RemoveIfNotTargeted() bool { return true } -// GraphNodeReferenceGlobal -func (n *NodeApplyableModuleVariable) ReferenceGlobal() bool { - // We have to create fully qualified references because we cross - // boundaries here: our ReferenceableName is in one path and our - // References are from another path. - return true +// GraphNodeReferenceOutside implementation +func (n *NodeApplyableModuleVariable) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) { + + // Module input variables have their value expressions defined in the + // context of their calling (parent) module, and so references from + // a node of this type should be resolved in the parent module instance. + referencePath = n.Addr.Module.Parent() + + // Input variables are _referenced_ from their own module, though. + selfPath = n.Addr.Module + + return // uses named return values } // GraphNodeReferenceable -func (n *NodeApplyableModuleVariable) ReferenceableName() []string { - return []string{n.Name()} +func (n *NodeApplyableModuleVariable) ReferenceableAddrs() []addrs.Referenceable { + return []addrs.Referenceable{n.Addr.Variable} } // GraphNodeReferencer -func (n *NodeApplyableModuleVariable) References() []string { - // If we have no value set, we depend on nothing - if n.Value == nil { - return nil - } +func (n *NodeApplyableModuleVariable) References() []*addrs.Reference { - // Can't depend on anything if we're in the root - if len(n.PathValue) < 2 { + // If we have no value expression, we cannot depend on anything. + if n.Expr == nil { return nil } - // Otherwise, we depend on anything that is in our value, but - // specifically in the namespace of the parent path. - // Create the prefix based on the path - var prefix string - if p := n.Path(); len(p) > 0 { - prefix = modulePrefixStr(p) + // Variables in the root don't depend on anything, because their values + // are gathered prior to the graph walk and recorded in the context. + if len(n.Addr.Module) == 0 { + return nil } - result := ReferencesFromConfig(n.Value) - return modulePrefixList(result, prefix) + // Otherwise, we depend on anything referenced by our value expression. + // We ignore diagnostics here under the assumption that we'll re-eval + // all these things later and catch them then; for our purposes here, + // we only care about valid references. + // + // Due to our GraphNodeReferenceOutside implementation, the addresses + // returned by this function are interpreted in the _parent_ module from + // where our associated variable was declared, which is correct because + // our value expression is assigned within a "module" block in the parent + // module. + refs, _ := lang.ReferencesInExpr(n.Expr) + return refs } // GraphNodeEvalable func (n *NodeApplyableModuleVariable) EvalTree() EvalNode { // If we have no value, do nothing - if n.Value == nil { + if n.Expr == nil { return &EvalNoop{} } // Otherwise, interpolate the value of this variable and set it // within the variables mapping. - var config *ResourceConfig - variables := make(map[string]interface{}) + vals := make(map[string]cty.Value) + + _, call := n.Addr.Module.CallInstance() return &EvalSequence{ Nodes: []EvalNode{ - &EvalOpFilter{ - Ops: []walkOperation{walkInput}, - Node: &EvalInterpolate{ - Config: n.Value, - Output: &config, - ContinueOnErr: true, - }, - }, &EvalOpFilter{ Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkValidate}, - Node: &EvalInterpolate{ - Config: n.Value, - Output: &config, - }, - }, + Node: &EvalModuleCallArgument{ + Addr: n.Addr.Variable, + Config: n.Config, + Expr: n.Expr, + Values: vals, - &EvalVariableBlock{ - Config: &config, - VariableValues: variables, - }, - - &EvalCoerceMapVariable{ - Variables: variables, - ModulePath: n.PathValue, - ModuleTree: n.Module, + IgnoreDiagnostics: false, + }, }, - &EvalTypeCheckVariable{ - Variables: variables, - ModulePath: n.PathValue, - ModuleTree: n.Module, + &EvalSetModuleCallArguments{ + Module: call, + Values: vals, }, + }, + } +} - &EvalSetVariables{ - Module: &n.PathValue[len(n.PathValue)-1], - Variables: variables, - }, +// dag.GraphNodeDotter impl. +func (n *NodeApplyableModuleVariable) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "note", }, } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output.go b/vendor/github.com/hashicorp/terraform/terraform/node_output.go index 83e9925a15..bb3d065311 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_output.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_output.go @@ -2,31 +2,38 @@ package terraform import ( "fmt" - "strings" - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/lang" ) // NodeApplyableOutput represents an output that is "applyable": // it is ready to be applied. type NodeApplyableOutput struct { - PathValue []string - Config *config.Output // Config is the output in the config + Addr addrs.AbsOutputValue + Config *configs.Output // Config is the output in the config } -func (n *NodeApplyableOutput) Name() string { - result := fmt.Sprintf("output.%s", n.Config.Name) - if len(n.PathValue) > 1 { - result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) - } +var ( + _ GraphNodeSubPath = (*NodeApplyableOutput)(nil) + _ RemovableIfNotTargeted = (*NodeApplyableOutput)(nil) + _ GraphNodeTargetDownstream = (*NodeApplyableOutput)(nil) + _ GraphNodeReferenceable = (*NodeApplyableOutput)(nil) + _ GraphNodeReferencer = (*NodeApplyableOutput)(nil) + _ GraphNodeReferenceOutside = (*NodeApplyableOutput)(nil) + _ GraphNodeEvalable = (*NodeApplyableOutput)(nil) + _ dag.GraphNodeDotter = (*NodeApplyableOutput)(nil) +) - return result +func (n *NodeApplyableOutput) Name() string { + return n.Addr.String() } // GraphNodeSubPath -func (n *NodeApplyableOutput) Path() []string { - return n.PathValue +func (n *NodeApplyableOutput) Path() addrs.ModuleInstance { + return n.Addr.Module } // RemovableIfNotTargeted @@ -44,75 +51,116 @@ func (n *NodeApplyableOutput) TargetDownstream(targetedDeps, untargetedDeps *dag return true } +func referenceOutsideForOutput(addr addrs.AbsOutputValue) (selfPath, referencePath addrs.ModuleInstance) { + + // Output values have their expressions resolved in the context of the + // module where they are defined. + referencePath = addr.Module + + // ...but they are referenced in the context of their calling module. + selfPath = addr.Module.Parent() + + return // uses named return values + +} + +// GraphNodeReferenceOutside implementation +func (n *NodeApplyableOutput) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) { + return referenceOutsideForOutput(n.Addr) +} + +func referenceableAddrsForOutput(addr addrs.AbsOutputValue) []addrs.Referenceable { + // An output in the root module can't be referenced at all. + if addr.Module.IsRoot() { + return nil + } + + // Otherwise, we can be referenced via a reference to our output name + // on the parent module's call, or via a reference to the entire call. + // e.g. module.foo.bar or just module.foo . + // Note that our ReferenceOutside method causes these addresses to be + // relative to the calling module, not the module where the output + // was declared. + _, outp := addr.ModuleCallOutput() + _, call := addr.Module.CallInstance() + return []addrs.Referenceable{outp, call} + +} + // GraphNodeReferenceable -func (n *NodeApplyableOutput) ReferenceableName() []string { - name := fmt.Sprintf("output.%s", n.Config.Name) - return []string{name} +func (n *NodeApplyableOutput) ReferenceableAddrs() []addrs.Referenceable { + return referenceableAddrsForOutput(n.Addr) } -// GraphNodeReferencer -func (n *NodeApplyableOutput) References() []string { - var result []string - result = append(result, n.Config.DependsOn...) - result = append(result, ReferencesFromConfig(n.Config.RawConfig)...) - for _, v := range result { - split := strings.Split(v, "/") - for i, s := range split { - split[i] = s + ".destroy" - } - - result = append(result, strings.Join(split, "/")) +func referencesForOutput(c *configs.Output) []*addrs.Reference { + impRefs, _ := lang.ReferencesInExpr(c.Expr) + expRefs, _ := lang.References(c.DependsOn) + l := len(impRefs) + len(expRefs) + if l == 0 { + return nil } + refs := make([]*addrs.Reference, 0, l) + refs = append(refs, impRefs...) + refs = append(refs, expRefs...) + return refs - return result +} + +// GraphNodeReferencer +func (n *NodeApplyableOutput) References() []*addrs.Reference { + return appendResourceDestroyReferences(referencesForOutput(n.Config)) } // GraphNodeEvalable func (n *NodeApplyableOutput) EvalTree() EvalNode { return &EvalSequence{ Nodes: []EvalNode{ - &EvalOpFilter{ - // Don't let interpolation errors stop Input, since it happens - // before Refresh. - Ops: []walkOperation{walkInput}, - Node: &EvalWriteOutput{ - Name: n.Config.Name, - Sensitive: n.Config.Sensitive, - Value: n.Config.RawConfig, - ContinueOnErr: true, - }, - }, &EvalOpFilter{ Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkValidate, walkDestroy, walkPlanDestroy}, Node: &EvalWriteOutput{ - Name: n.Config.Name, + Addr: n.Addr.OutputValue, Sensitive: n.Config.Sensitive, - Value: n.Config.RawConfig, + Expr: n.Config.Expr, }, }, }, } } +// dag.GraphNodeDotter impl. +func (n *NodeApplyableOutput) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "note", + }, + } +} + // NodeDestroyableOutput represents an output that is "destroybale": // its application will remove the output from the state. type NodeDestroyableOutput struct { - PathValue []string - Config *config.Output // Config is the output in the config + Addr addrs.AbsOutputValue + Config *configs.Output // Config is the output in the config } -func (n *NodeDestroyableOutput) Name() string { - result := fmt.Sprintf("output.%s (destroy)", n.Config.Name) - if len(n.PathValue) > 1 { - result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) - } +var ( + _ GraphNodeSubPath = (*NodeDestroyableOutput)(nil) + _ RemovableIfNotTargeted = (*NodeDestroyableOutput)(nil) + _ GraphNodeTargetDownstream = (*NodeDestroyableOutput)(nil) + _ GraphNodeReferencer = (*NodeDestroyableOutput)(nil) + _ GraphNodeEvalable = (*NodeDestroyableOutput)(nil) + _ dag.GraphNodeDotter = (*NodeDestroyableOutput)(nil) +) - return result +func (n *NodeDestroyableOutput) Name() string { + return fmt.Sprintf("%s (destroy)", n.Addr.String()) } // GraphNodeSubPath -func (n *NodeDestroyableOutput) Path() []string { - return n.PathValue +func (n *NodeDestroyableOutput) Path() addrs.ModuleInstance { + return n.Addr.Module } // RemovableIfNotTargeted @@ -129,25 +177,24 @@ func (n *NodeDestroyableOutput) TargetDownstream(targetedDeps, untargetedDeps *d } // GraphNodeReferencer -func (n *NodeDestroyableOutput) References() []string { - var result []string - result = append(result, n.Config.DependsOn...) - result = append(result, ReferencesFromConfig(n.Config.RawConfig)...) - for _, v := range result { - split := strings.Split(v, "/") - for i, s := range split { - split[i] = s + ".destroy" - } - - result = append(result, strings.Join(split, "/")) - } - - return result +func (n *NodeDestroyableOutput) References() []*addrs.Reference { + return referencesForOutput(n.Config) } // GraphNodeEvalable func (n *NodeDestroyableOutput) EvalTree() EvalNode { return &EvalDeleteOutput{ - Name: n.Config.Name, + Addr: n.Addr.OutputValue, + } +} + +// dag.GraphNodeDotter impl. +func (n *NodeDestroyableOutput) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "note", + }, } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go index 0fd1554a95..518b8aa09f 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go @@ -2,31 +2,39 @@ package terraform import ( "fmt" + + "github.com/hashicorp/terraform/addrs" ) // NodeOutputOrphan represents an output that is an orphan. type NodeOutputOrphan struct { - OutputName string - PathValue []string + Addr addrs.AbsOutputValue } +var ( + _ GraphNodeSubPath = (*NodeOutputOrphan)(nil) + _ GraphNodeReferenceable = (*NodeOutputOrphan)(nil) + _ GraphNodeReferenceOutside = (*NodeOutputOrphan)(nil) + _ GraphNodeEvalable = (*NodeOutputOrphan)(nil) +) + func (n *NodeOutputOrphan) Name() string { - result := fmt.Sprintf("output.%s (orphan)", n.OutputName) - if len(n.PathValue) > 1 { - result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) - } + return fmt.Sprintf("%s (orphan)", n.Addr.String()) +} - return result +// GraphNodeReferenceOutside implementation +func (n *NodeOutputOrphan) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) { + return referenceOutsideForOutput(n.Addr) } // GraphNodeReferenceable -func (n *NodeOutputOrphan) ReferenceableName() []string { - return []string{"output." + n.OutputName} +func (n *NodeOutputOrphan) ReferenceableAddrs() []addrs.Referenceable { + return referenceableAddrsForOutput(n.Addr) } // GraphNodeSubPath -func (n *NodeOutputOrphan) Path() []string { - return n.PathValue +func (n *NodeOutputOrphan) Path() addrs.ModuleInstance { + return n.Addr.Module } // GraphNodeEvalable @@ -34,7 +42,7 @@ func (n *NodeOutputOrphan) EvalTree() EvalNode { return &EvalOpFilter{ Ops: []walkOperation{walkRefresh, walkApply, walkDestroy}, Node: &EvalDeleteOutput{ - Name: n.OutputName, + Addr: n.Addr.OutputValue, }, } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go index 9e490f7b4f..a0cdcfe01d 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go @@ -1,10 +1,10 @@ package terraform import ( - "fmt" - "strings" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/dag" ) @@ -15,37 +15,33 @@ type ConcreteProviderNodeFunc func(*NodeAbstractProvider) dag.Vertex // NodeAbstractProvider represents a provider that has no associated operations. // It registers all the common interfaces across operations for providers. type NodeAbstractProvider struct { - NameValue string - PathValue []string + Addr addrs.AbsProviderConfig // The fields below will be automatically set using the Attach // interfaces if you're running those transforms, but also be explicitly // set if you already have that information. - Config *config.ProviderConfig + Config *configs.Provider + Schema *configschema.Block } -func ResolveProviderName(name string, path []string) string { - if strings.Contains(name, "provider.") { - // already resolved - return name - } - - name = fmt.Sprintf("provider.%s", name) - if len(path) >= 1 { - name = fmt.Sprintf("%s.%s", modulePrefixStr(path), name) - } - - return name -} +var ( + _ GraphNodeSubPath = (*NodeAbstractProvider)(nil) + _ RemovableIfNotTargeted = (*NodeAbstractProvider)(nil) + _ GraphNodeReferencer = (*NodeAbstractProvider)(nil) + _ GraphNodeProvider = (*NodeAbstractProvider)(nil) + _ GraphNodeAttachProvider = (*NodeAbstractProvider)(nil) + _ GraphNodeAttachProviderConfigSchema = (*NodeAbstractProvider)(nil) + _ dag.GraphNodeDotter = (*NodeAbstractProvider)(nil) +) func (n *NodeAbstractProvider) Name() string { - return ResolveProviderName(n.NameValue, n.PathValue) + return n.Addr.String() } // GraphNodeSubPath -func (n *NodeAbstractProvider) Path() []string { - return n.PathValue +func (n *NodeAbstractProvider) Path() addrs.ModuleInstance { + return n.Addr.Module } // RemovableIfNotTargeted @@ -56,21 +52,21 @@ func (n *NodeAbstractProvider) RemoveIfNotTargeted() bool { } // GraphNodeReferencer -func (n *NodeAbstractProvider) References() []string { - if n.Config == nil { +func (n *NodeAbstractProvider) References() []*addrs.Reference { + if n.Config == nil || n.Schema == nil { return nil } - return ReferencesFromConfig(n.Config.RawConfig) + return ReferencesFromConfig(n.Config.Config, n.Schema) } // GraphNodeProvider -func (n *NodeAbstractProvider) ProviderName() string { - return n.NameValue +func (n *NodeAbstractProvider) ProviderAddr() addrs.AbsProviderConfig { + return n.Addr } // GraphNodeProvider -func (n *NodeAbstractProvider) ProviderConfig() *config.ProviderConfig { +func (n *NodeAbstractProvider) ProviderConfig() *configs.Provider { if n.Config == nil { return nil } @@ -79,10 +75,15 @@ func (n *NodeAbstractProvider) ProviderConfig() *config.ProviderConfig { } // GraphNodeAttachProvider -func (n *NodeAbstractProvider) AttachProvider(c *config.ProviderConfig) { +func (n *NodeAbstractProvider) AttachProvider(c *configs.Provider) { n.Config = c } +// GraphNodeAttachProviderConfigSchema impl. +func (n *NodeAbstractProvider) AttachProviderConfigSchema(schema *configschema.Block) { + n.Schema = schema +} + // GraphNodeDotter impl. func (n *NodeAbstractProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { return &dag.DotNode{ diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go index a00bc46fb4..30d8813a45 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go @@ -2,6 +2,8 @@ package terraform import ( "fmt" + + "github.com/hashicorp/terraform/dag" ) // NodeDisabledProvider represents a provider that is disabled. A disabled @@ -11,24 +13,15 @@ type NodeDisabledProvider struct { *NodeAbstractProvider } +var ( + _ GraphNodeSubPath = (*NodeDisabledProvider)(nil) + _ RemovableIfNotTargeted = (*NodeDisabledProvider)(nil) + _ GraphNodeReferencer = (*NodeDisabledProvider)(nil) + _ GraphNodeProvider = (*NodeDisabledProvider)(nil) + _ GraphNodeAttachProvider = (*NodeDisabledProvider)(nil) + _ dag.GraphNodeDotter = (*NodeDisabledProvider)(nil) +) + func (n *NodeDisabledProvider) Name() string { return fmt.Sprintf("%s (disabled)", n.NodeAbstractProvider.Name()) } - -// GraphNodeEvalable -func (n *NodeDisabledProvider) EvalTree() EvalNode { - var resourceConfig *ResourceConfig - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalInterpolateProvider{ - Config: n.ProviderConfig(), - Output: &resourceConfig, - }, - &EvalBuildProviderConfig{ - Provider: n.ProviderName(), - Config: &resourceConfig, - Output: &resourceConfig, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_eval.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_eval.go new file mode 100644 index 0000000000..580e60cb7e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_eval.go @@ -0,0 +1,20 @@ +package terraform + +// NodeEvalableProvider represents a provider during an "eval" walk. +// This special provider node type just initializes a provider and +// fetches its schema, without configuring it or otherwise interacting +// with it. +type NodeEvalableProvider struct { + *NodeAbstractProvider +} + +// GraphNodeEvalable +func (n *NodeEvalableProvider) EvalTree() EvalNode { + addr := n.Addr + relAddr := addr.ProviderConfig + + return &EvalInitProvider{ + TypeName: relAddr.Type, + Addr: addr.ProviderConfig, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go index bb117c1d6f..31ed1a8c83 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go @@ -3,6 +3,7 @@ package terraform import ( "fmt" + "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/config" ) @@ -10,7 +11,7 @@ import ( // It registers all the common interfaces across operations for providers. type NodeProvisioner struct { NameValue string - PathValue []string + PathValue addrs.ModuleInstance // The fields below will be automatically set using the Attach // interfaces if you're running those transforms, but also be explicitly @@ -19,17 +20,23 @@ type NodeProvisioner struct { Config *config.ProviderConfig } +var ( + _ GraphNodeSubPath = (*NodeProvisioner)(nil) + _ GraphNodeProvisioner = (*NodeProvisioner)(nil) + _ GraphNodeEvalable = (*NodeProvisioner)(nil) +) + func (n *NodeProvisioner) Name() string { result := fmt.Sprintf("provisioner.%s", n.NameValue) - if len(n.PathValue) > 1 { - result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) + if len(n.PathValue) > 0 { + result = fmt.Sprintf("%s.%s", n.PathValue.String(), result) } return result } // GraphNodeSubPath -func (n *NodeProvisioner) Path() []string { +func (n *NodeProvisioner) Path() addrs.ModuleInstance { return n.PathValue } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go index 73509c87f1..3a0570c5b6 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go @@ -2,10 +2,16 @@ package terraform import ( "fmt" - "strings" + "log" + "sort" - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/lang" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) // ConcreteResourceNodeFunc is a callback type used to convert an @@ -16,225 +22,420 @@ type ConcreteResourceNodeFunc func(*NodeAbstractResource) dag.Vertex // The type of operation cannot be assumed, only that this node represents // the given resource. type GraphNodeResource interface { - ResourceAddr() *ResourceAddress + ResourceAddr() addrs.AbsResource +} + +// ConcreteResourceInstanceNodeFunc is a callback type used to convert an +// abstract resource instance to a concrete one of some type. +type ConcreteResourceInstanceNodeFunc func(*NodeAbstractResourceInstance) dag.Vertex + +// GraphNodeResourceInstance is implemented by any nodes that represent +// a resource instance. A single resource may have multiple instances if, +// for example, the "count" or "for_each" argument is used for it in +// configuration. +type GraphNodeResourceInstance interface { + ResourceInstanceAddr() addrs.AbsResourceInstance } // NodeAbstractResource represents a resource that has no associated // operations. It registers all the interfaces for a resource that common // across multiple operation types. type NodeAbstractResource struct { - Addr *ResourceAddress // Addr is the address for this resource + Addr addrs.AbsResource // Addr is the address for this resource // The fields below will be automatically set using the Attach // interfaces if you're running those transforms, but also be explicitly // set if you already have that information. - Config *config.Resource // Config is the resource in the config - ResourceState *ResourceState // ResourceState is the ResourceState for this + Schema *configschema.Block // Schema for processing the configuration body + SchemaVersion uint64 // Schema version of "Schema", as decided by the provider + Config *configs.Resource // Config is the resource in the config - Targets []ResourceAddress // Set from GraphNodeTargetable + ProvisionerSchemas map[string]*configschema.Block + + Targets []addrs.Targetable // Set from GraphNodeTargetable // The address of the provider this resource will use - ResolvedProvider string + ResolvedProvider addrs.AbsProviderConfig +} + +var ( + _ GraphNodeSubPath = (*NodeAbstractResource)(nil) + _ GraphNodeReferenceable = (*NodeAbstractResource)(nil) + _ GraphNodeReferencer = (*NodeAbstractResource)(nil) + _ GraphNodeProviderConsumer = (*NodeAbstractResource)(nil) + _ GraphNodeProvisionerConsumer = (*NodeAbstractResource)(nil) + _ GraphNodeResource = (*NodeAbstractResource)(nil) + _ GraphNodeAttachResourceConfig = (*NodeAbstractResource)(nil) + _ GraphNodeAttachResourceSchema = (*NodeAbstractResource)(nil) + _ GraphNodeAttachProvisionerSchema = (*NodeAbstractResource)(nil) + _ GraphNodeTargetable = (*NodeAbstractResource)(nil) + _ dag.GraphNodeDotter = (*NodeAbstractResource)(nil) +) + +// NewNodeAbstractResource creates an abstract resource graph node for +// the given absolute resource address. +func NewNodeAbstractResource(addr addrs.AbsResource) *NodeAbstractResource { + return &NodeAbstractResource{ + Addr: addr, + } +} + +// NodeAbstractResourceInstance represents a resource instance with no +// associated operations. It embeds NodeAbstractResource but additionally +// contains an instance key, used to identify one of potentially many +// instances that were created from a resource in configuration, e.g. using +// the "count" or "for_each" arguments. +type NodeAbstractResourceInstance struct { + NodeAbstractResource + InstanceKey addrs.InstanceKey + + // The fields below will be automatically set using the Attach + // interfaces if you're running those transforms, but also be explicitly + // set if you already have that information. + + ResourceState *states.Resource +} + +var ( + _ GraphNodeSubPath = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeReferenceable = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeReferencer = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeProviderConsumer = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeProvisionerConsumer = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeResource = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeResourceInstance = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeAttachResourceState = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeAttachResourceConfig = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeAttachResourceSchema = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeAttachProvisionerSchema = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeTargetable = (*NodeAbstractResourceInstance)(nil) + _ dag.GraphNodeDotter = (*NodeAbstractResourceInstance)(nil) +) + +// NewNodeAbstractResourceInstance creates an abstract resource instance graph +// node for the given absolute resource instance address. +func NewNodeAbstractResourceInstance(addr addrs.AbsResourceInstance) *NodeAbstractResourceInstance { + // Due to the fact that we embed NodeAbstractResource, the given address + // actually ends up split between the resource address in the embedded + // object and the InstanceKey field in our own struct. The + // ResourceInstanceAddr method will stick these back together again on + // request. + return &NodeAbstractResourceInstance{ + NodeAbstractResource: NodeAbstractResource{ + Addr: addr.ContainingResource(), + }, + InstanceKey: addr.Resource.Key, + } } func (n *NodeAbstractResource) Name() string { - return n.Addr.String() + return n.ResourceAddr().String() +} + +func (n *NodeAbstractResourceInstance) Name() string { + return n.ResourceInstanceAddr().String() } // GraphNodeSubPath -func (n *NodeAbstractResource) Path() []string { - return n.Addr.Path +func (n *NodeAbstractResource) Path() addrs.ModuleInstance { + return n.Addr.Module } // GraphNodeReferenceable -func (n *NodeAbstractResource) ReferenceableName() []string { - // We always are referenceable as "type.name" as long as - // we have a config or address. Determine what that value is. - var id string - if n.Config != nil { - id = n.Config.Id() - } else if n.Addr != nil { - addrCopy := n.Addr.Copy() - addrCopy.Path = nil // ReferenceTransformer handles paths - addrCopy.Index = -1 // We handle indexes below - id = addrCopy.String() - } else { - // No way to determine our type.name, just return - return nil - } +func (n *NodeAbstractResource) ReferenceableAddrs() []addrs.Referenceable { + return []addrs.Referenceable{n.Addr.Resource} +} - var result []string +// GraphNodeReferenceable +func (n *NodeAbstractResourceInstance) ReferenceableAddrs() []addrs.Referenceable { + addr := n.ResourceInstanceAddr() + return []addrs.Referenceable{ + addr.Resource, + + // A resource instance can also be referenced by the address of its + // containing resource, so that e.g. a reference to aws_instance.foo + // would match both aws_instance.foo[0] and aws_instance.foo[1]. + addr.ContainingResource().Resource, + } +} - // Always include our own ID. This is primarily for backwards - // compatibility with states that didn't yet support the more - // specific dep string. - result = append(result, id) +// GraphNodeReferencer +func (n *NodeAbstractResource) References() []*addrs.Reference { + // If we have a config then we prefer to use that. + if c := n.Config; c != nil { + var result []*addrs.Reference + + for _, traversal := range c.DependsOn { + ref, err := addrs.ParseRef(traversal) + if err != nil { + // We ignore this here, because this isn't a suitable place to return + // errors. This situation should be caught and rejected during + // validation. + log.Printf("[ERROR] Can't parse %#v from depends_on as reference: %s", traversal, err) + continue + } - // We represent all multi-access - result = append(result, fmt.Sprintf("%s.*", id)) + result = append(result, ref) + } - // We represent either a specific number, or all numbers - suffix := "N" - if n.Addr != nil { - idx := n.Addr.Index - if idx == -1 { - idx = 0 + if n.Schema == nil { + // Should never happens, but we'll log if it does so that we can + // see this easily when debugging. + log.Printf("[WARN] no schema is attached to %s, so config references cannot be detected", n.Name()) } - suffix = fmt.Sprintf("%d", idx) + refs, _ := lang.ReferencesInExpr(c.Count) + result = append(result, refs...) + refs, _ = lang.ReferencesInBlock(c.Config, n.Schema) + result = append(result, refs...) + if c.Managed != nil { + for _, p := range c.Managed.Provisioners { + if p.When != configs.ProvisionerWhenCreate { + continue + } + if p.Connection != nil { + refs, _ = lang.ReferencesInBlock(p.Connection.Config, connectionBlockSupersetSchema) + result = append(result, refs...) + } + + schema := n.ProvisionerSchemas[p.Type] + if schema == nil { + log.Printf("[WARN] no schema for provisioner %q is attached to %s, so provisioner block references cannot be detected", p.Type, n.Name()) + } + refs, _ = lang.ReferencesInBlock(p.Config, schema) + result = append(result, refs...) + } + } + return result } - result = append(result, fmt.Sprintf("%s.%s", id, suffix)) - return result + // Otherwise, we have no references. + return nil } // GraphNodeReferencer -func (n *NodeAbstractResource) References() []string { - // If we have a config, that is our source of truth - if c := n.Config; c != nil { - // Grab all the references - var result []string - result = append(result, c.DependsOn...) - result = append(result, ReferencesFromConfig(c.RawCount)...) - result = append(result, ReferencesFromConfig(c.RawConfig)...) - for _, p := range c.Provisioners { - if p.When == config.ProvisionerWhenCreate { - result = append(result, ReferencesFromConfig(p.ConnInfo)...) - result = append(result, ReferencesFromConfig(p.RawConfig)...) - } +func (n *NodeAbstractResourceInstance) References() []*addrs.Reference { + // If we have a configuration attached then we'll delegate to our + // embedded abstract resource, which knows how to extract dependencies + // from configuration. + if n.Config != nil { + if n.Schema == nil { + // We'll produce a log message about this out here so that + // we can include the full instance address, since the equivalent + // message in NodeAbstractResource.References cannot see it. + log.Printf("[WARN] no schema is attached to %s, so config references cannot be detected", n.Name()) + return nil } - - return uniqueStrings(result) + return n.NodeAbstractResource.References() } - // If we have state, that is our next source - if s := n.ResourceState; s != nil { - return s.Dependencies + // Otherwise, if we have state then we'll use the values stored in state + // as a fallback. + if rs := n.ResourceState; rs != nil { + if s := rs.Instance(n.InstanceKey); s != nil { + // State is still storing dependencies as old-style strings, so we'll + // need to do a little work here to massage this to the form we now + // want. + var result []*addrs.Reference + for _, addr := range s.Current.Dependencies { + if addr == nil { + // Should never happen; indicates a bug in the state loader + panic(fmt.Sprintf("dependencies for current object on %s contains nil address", n.ResourceInstanceAddr())) + } + + // This is a little weird: we need to manufacture an addrs.Reference + // with a fake range here because the state isn't something we can + // make source references into. + result = append(result, &addrs.Reference{ + Subject: addr, + SourceRange: tfdiags.SourceRange{ + Filename: "(state file)", + }, + }) + } + return result + } } + // If we have neither config nor state then we have no references. return nil } +// converts an instance address to the legacy dotted notation +func dottedInstanceAddr(tr addrs.ResourceInstance) string { + // The legacy state format uses dot-separated instance keys, + // rather than bracketed as in our modern syntax. + var suffix string + switch tk := tr.Key.(type) { + case addrs.IntKey: + suffix = fmt.Sprintf(".%d", int(tk)) + case addrs.StringKey: + suffix = fmt.Sprintf(".%s", string(tk)) + } + return tr.Resource.String() + suffix +} + // StateReferences returns the dependencies to put into the state for // this resource. -func (n *NodeAbstractResource) StateReferences() []string { - self := n.ReferenceableName() - - // Determine what our "prefix" is for checking for references to - // ourself. - addrCopy := n.Addr.Copy() - addrCopy.Index = -1 - selfPrefix := addrCopy.String() + "." +func (n *NodeAbstractResourceInstance) StateReferences() []addrs.Referenceable { + selfAddrs := n.ReferenceableAddrs() + + // Since we don't include the source location references in our + // results from this method, we'll also filter out duplicates: + // there's no point in listing the same object twice without + // that additional context. + seen := map[string]struct{}{} + + // Pretend that we've already "seen" all of our own addresses so that we + // won't record self-references in the state. This can arise if, for + // example, a provisioner for a resource refers to the resource itself, + // which is valid (since provisioners always run after apply) but should + // not create an explicit dependency edge. + for _, selfAddr := range selfAddrs { + seen[selfAddr.String()] = struct{}{} + if riAddr, ok := selfAddr.(addrs.ResourceInstance); ok { + seen[riAddr.ContainingResource().String()] = struct{}{} + } + } depsRaw := n.References() - deps := make([]string, 0, len(depsRaw)) + deps := make([]addrs.Referenceable, 0, len(depsRaw)) for _, d := range depsRaw { - // Ignore any variable dependencies - if strings.HasPrefix(d, "var.") { - continue + subj := d.Subject + if mco, isOutput := subj.(addrs.ModuleCallOutput); isOutput { + // For state dependencies, we simplify outputs to just refer + // to the module as a whole. It's not really clear why we do this, + // but this logic is preserved from before the 0.12 rewrite of + // this function. + subj = mco.Call } - // If this has a backup ref, ignore those for now. The old state - // file never contained those and I'd rather store the rich types we - // add in the future. - if idx := strings.IndexRune(d, '/'); idx != -1 { - d = d[:idx] - } - - // If we're referencing ourself, then ignore it - found := false - for _, s := range self { - if d == s { - found = true - } - } - if found { + k := subj.String() + if _, exists := seen[k]; exists { continue } - - // If this is a reference to ourself and a specific index, we keep - // it. For example, if this resource is "foo.bar" and the reference - // is "foo.bar.0" then we keep it exact. Otherwise, we strip it. - if strings.HasSuffix(d, ".0") && !strings.HasPrefix(d, selfPrefix) { - d = d[:len(d)-2] - } - - // This is sad. The dependencies are currently in the format of - // "module.foo.bar" (the full field). This strips the field off. - if strings.HasPrefix(d, "module.") { - parts := strings.SplitN(d, ".", 3) - d = strings.Join(parts[0:2], ".") + seen[k] = struct{}{} + switch tr := subj.(type) { + case addrs.ResourceInstance: + deps = append(deps, tr) + case addrs.Resource: + deps = append(deps, tr) + case addrs.ModuleCallInstance: + deps = append(deps, tr) + default: + // No other reference types are recorded in the state. } - - deps = append(deps, d) } + // We'll also sort them, since that'll avoid creating changes in the + // serialized state that make no semantic difference. + sort.Slice(deps, func(i, j int) bool { + // Simple string-based sort because we just care about consistency, + // not user-friendliness. + return deps[i].String() < deps[j].String() + }) + return deps } -func (n *NodeAbstractResource) SetProvider(p string) { +func (n *NodeAbstractResource) SetProvider(p addrs.AbsProviderConfig) { n.ResolvedProvider = p } // GraphNodeProviderConsumer -func (n *NodeAbstractResource) ProvidedBy() string { +func (n *NodeAbstractResource) ProvidedBy() (addrs.AbsProviderConfig, bool) { // If we have a config we prefer that above all else if n.Config != nil { - return resourceProvider(n.Config.Type, n.Config.Provider) + relAddr := n.Config.ProviderConfigAddr() + return relAddr.Absolute(n.Path()), false + } + + // Use our type and containing module path to guess a provider configuration address + return n.Addr.Resource.DefaultProviderConfig().Absolute(n.Addr.Module), false +} + +// GraphNodeProviderConsumer +func (n *NodeAbstractResourceInstance) ProvidedBy() (addrs.AbsProviderConfig, bool) { + // If we have a config we prefer that above all else + if n.Config != nil { + relAddr := n.Config.ProviderConfigAddr() + return relAddr.Absolute(n.Path()), false } // If we have state, then we will use the provider from there - if n.ResourceState != nil && n.ResourceState.Provider != "" { - return n.ResourceState.Provider + if n.ResourceState != nil { + // An address from the state must match exactly, since we must ensure + // we refresh/destroy a resource with the same provider configuration + // that created it. + return n.ResourceState.ProviderConfig, true } - // Use our type - return resourceProvider(n.Addr.Type, "") + // Use our type and containing module path to guess a provider configuration address + return n.Addr.Resource.DefaultProviderConfig().Absolute(n.Path()), false } // GraphNodeProvisionerConsumer func (n *NodeAbstractResource) ProvisionedBy() []string { // If we have no configuration, then we have no provisioners - if n.Config == nil { + if n.Config == nil || n.Config.Managed == nil { return nil } // Build the list of provisioners we need based on the configuration. // It is okay to have duplicates here. - result := make([]string, len(n.Config.Provisioners)) - for i, p := range n.Config.Provisioners { + result := make([]string, len(n.Config.Managed.Provisioners)) + for i, p := range n.Config.Managed.Provisioners { result[i] = p.Type } return result } -// GraphNodeResource, GraphNodeAttachResourceState -func (n *NodeAbstractResource) ResourceAddr() *ResourceAddress { +// GraphNodeProvisionerConsumer +func (n *NodeAbstractResource) AttachProvisionerSchema(name string, schema *configschema.Block) { + if n.ProvisionerSchemas == nil { + n.ProvisionerSchemas = make(map[string]*configschema.Block) + } + n.ProvisionerSchemas[name] = schema +} + +// GraphNodeResource +func (n *NodeAbstractResource) ResourceAddr() addrs.AbsResource { return n.Addr } +// GraphNodeResourceInstance +func (n *NodeAbstractResourceInstance) ResourceInstanceAddr() addrs.AbsResourceInstance { + return n.NodeAbstractResource.Addr.Instance(n.InstanceKey) +} + // GraphNodeAddressable, TODO: remove, used by target, should unify func (n *NodeAbstractResource) ResourceAddress() *ResourceAddress { - return n.ResourceAddr() + return NewLegacyResourceAddress(n.Addr) } // GraphNodeTargetable -func (n *NodeAbstractResource) SetTargets(targets []ResourceAddress) { +func (n *NodeAbstractResource) SetTargets(targets []addrs.Targetable) { n.Targets = targets } // GraphNodeAttachResourceState -func (n *NodeAbstractResource) AttachResourceState(s *ResourceState) { +func (n *NodeAbstractResourceInstance) AttachResourceState(s *states.Resource) { n.ResourceState = s } // GraphNodeAttachResourceConfig -func (n *NodeAbstractResource) AttachResourceConfig(c *config.Resource) { +func (n *NodeAbstractResource) AttachResourceConfig(c *configs.Resource) { n.Config = c } +// GraphNodeAttachResourceSchema impl +func (n *NodeAbstractResource) AttachResourceSchema(schema *configschema.Block, version uint64) { + n.Schema = schema + n.SchemaVersion = version +} + // GraphNodeDotter impl. func (n *NodeAbstractResource) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { return &dag.DotNode{ diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go deleted file mode 100644 index 573570d8e2..0000000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go +++ /dev/null @@ -1,50 +0,0 @@ -package terraform - -// NodeAbstractCountResource should be embedded instead of NodeAbstractResource -// if the resource has a `count` value that needs to be expanded. -// -// The embedder should implement `DynamicExpand` to process the count. -type NodeAbstractCountResource struct { - *NodeAbstractResource - - // Validate, if true, will perform the validation for the count. - // This should only be turned on for the "validate" operation. - Validate bool -} - -// GraphNodeEvalable -func (n *NodeAbstractCountResource) EvalTree() EvalNode { - // We only check if the count is computed if we're not validating. - // If we're validating we allow computed counts since they just turn - // into more computed values. - var evalCountCheckComputed EvalNode - if !n.Validate { - evalCountCheckComputed = &EvalCountCheckComputed{Resource: n.Config} - } - - return &EvalSequence{ - Nodes: []EvalNode{ - // The EvalTree for a plannable resource primarily involves - // interpolating the count since it can contain variables - // we only just received access to. - // - // With the interpolated count, we can then DynamicExpand - // into the proper number of instances. - &EvalInterpolate{Config: n.Config.RawCount}, - - // Check if the count is computed - evalCountCheckComputed, - - // If validation is enabled, perform the validation - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - return n.Validate, nil - }, - - Then: &EvalValidateCount{Resource: n.Config}, - }, - - &EvalCountFixZeroOneBoundary{Resource: n.Config}, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go index 40ee1cf2a7..3e2fff3a05 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go @@ -1,400 +1,71 @@ package terraform import ( - "fmt" + "log" - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/lang" ) // NodeApplyableResource represents a resource that is "applyable": -// it is ready to be applied and is represented by a diff. +// it may need to have its record in the state adjusted to match configuration. +// +// Unlike in the plan walk, this resource node does not DynamicExpand. Instead, +// it should be inserted into the same graph as any instances of the nodes +// with dependency edges ensuring that the resource is evaluated before any +// of its instances, which will turn ensure that the whole-resource record +// in the state is suitably prepared to receive any updates to instances. type NodeApplyableResource struct { *NodeAbstractResource } -// GraphNodeCreator -func (n *NodeApplyableResource) CreateAddr() *ResourceAddress { - return n.NodeAbstractResource.Addr -} - -// GraphNodeReferencer, overriding NodeAbstractResource -func (n *NodeApplyableResource) References() []string { - result := n.NodeAbstractResource.References() - - // The "apply" side of a resource generally also depends on the - // destruction of its dependencies as well. For example, if a LB - // references a set of VMs with ${vm.foo.*.id}, then we must wait for - // the destruction so we get the newly updated list of VMs. - // - // The exception here is CBD. When CBD is set, we don't do this since - // it would create a cycle. By not creating a cycle, we require two - // applies since the first apply the creation step will use the OLD - // values (pre-destroy) and the second step will update. - // - // This is how Terraform behaved with "legacy" graphs (TF <= 0.7.x). - // We mimic that behavior here now and can improve upon it in the future. - // - // This behavior is tested in graph_build_apply_test.go to test ordering. - cbd := n.Config != nil && n.Config.Lifecycle.CreateBeforeDestroy - if !cbd { - // The "apply" side of a resource always depends on the destruction - // of all its dependencies in addition to the creation. - for _, v := range result { - result = append(result, v+".destroy") - } - } +var ( + _ GraphNodeResource = (*NodeApplyableResource)(nil) + _ GraphNodeEvalable = (*NodeApplyableResource)(nil) + _ GraphNodeProviderConsumer = (*NodeApplyableResource)(nil) + _ GraphNodeAttachResourceConfig = (*NodeApplyableResource)(nil) + _ GraphNodeReferencer = (*NodeApplyableResource)(nil) +) - return result +func (n *NodeApplyableResource) Name() string { + return n.NodeAbstractResource.Name() + " (prepare state)" } -// GraphNodeEvalable -func (n *NodeApplyableResource) EvalTree() EvalNode { - addr := n.NodeAbstractResource.Addr - - // stateId is the ID to put into the state - stateId := addr.stateId() - - // Build the instance info. More of this will be populated during eval - info := &InstanceInfo{ - Id: stateId, - Type: addr.Type, +func (n *NodeApplyableResource) References() []*addrs.Reference { + if n.Config == nil { + log.Printf("[WARN] NodeApplyableResource %q: no configuration, so can't determine References", dag.VertexName(n)) + return nil } - // Build the resource for eval - resource := &Resource{ - Name: addr.Name, - Type: addr.Type, - CountIndex: addr.Index, - } - if resource.CountIndex < 0 { - resource.CountIndex = 0 - } + var result []*addrs.Reference - // Determine the dependencies for the state. - stateDeps := n.StateReferences() + // Since this node type only updates resource-level metadata, we only + // need to worry about the parts of the configuration that affect + // our "each mode": the count and for_each meta-arguments. + refs, _ := lang.ReferencesInExpr(n.Config.Count) + result = append(result, refs...) + refs, _ = lang.ReferencesInExpr(n.Config.ForEach) + result = append(result, refs...) - // Eval info is different depending on what kind of resource this is - switch n.Config.Mode { - case config.ManagedResourceMode: - return n.evalTreeManagedResource( - stateId, info, resource, stateDeps, - ) - case config.DataResourceMode: - return n.evalTreeDataResource( - stateId, info, resource, stateDeps) - default: - panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) - } + return result } -func (n *NodeApplyableResource) evalTreeDataResource( - stateId string, info *InstanceInfo, - resource *Resource, stateDeps []string) EvalNode { - var provider ResourceProvider - var config *ResourceConfig - var diff *InstanceDiff - var state *InstanceState - - return &EvalSequence{ - Nodes: []EvalNode{ - // Build the instance info - &EvalInstanceInfo{ - Info: info, - }, - - // Get the saved diff for apply - &EvalReadDiff{ - Name: stateId, - Diff: &diff, - }, - - // Stop here if we don't actually have a diff - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if diff == nil { - return true, EvalEarlyExitError{} - } - - if diff.GetAttributesLen() == 0 { - return true, EvalEarlyExitError{} - } - - return true, nil - }, - Then: EvalNoop{}, - }, - - // Normally we interpolate count as a preparation step before - // a DynamicExpand, but an apply graph has pre-expanded nodes - // and so the count would otherwise never be interpolated. - // - // This is redundant when there are multiple instances created - // from the same config (count > 1) but harmless since the - // underlying structures have mutexes to make this concurrency-safe. - // - // In most cases this isn't actually needed because we dealt with - // all of the counts during the plan walk, but we do it here - // for completeness because other code assumes that the - // final count is always available during interpolation. - // - // Here we are just populating the interpolated value in-place - // inside this RawConfig object, like we would in - // NodeAbstractCountResource. - &EvalInterpolate{ - Config: n.Config.RawCount, - ContinueOnErr: true, - }, - - // We need to re-interpolate the config here, rather than - // just using the diff's values directly, because we've - // potentially learned more variable values during the - // apply pass that weren't known when the diff was produced. - &EvalInterpolate{ - Config: n.Config.RawConfig.Copy(), - Resource: resource, - Output: &config, - }, - - &EvalGetProvider{ - Name: n.ResolvedProvider, - Output: &provider, - }, - - // Make a new diff with our newly-interpolated config. - &EvalReadDataDiff{ - Info: info, - Config: &config, - Previous: &diff, - Provider: &provider, - Output: &diff, - }, - - &EvalReadDataApply{ - Info: info, - Diff: &diff, - Provider: &provider, - Output: &state, - }, - - &EvalWriteState{ - Name: stateId, - ResourceType: n.Config.Type, - Provider: n.ResolvedProvider, - Dependencies: stateDeps, - State: &state, - }, - - // Clear the diff now that we've applied it, so - // later nodes won't see a diff that's now a no-op. - &EvalWriteDiff{ - Name: stateId, - Diff: nil, - }, - - &EvalUpdateStateHook{}, - }, +// GraphNodeEvalable +func (n *NodeApplyableResource) EvalTree() EvalNode { + addr := n.ResourceAddr() + config := n.Config + providerAddr := n.ResolvedProvider + + if config == nil { + // Nothing to do, then. + log.Printf("[TRACE] NodeApplyableResource: no configuration present for %s", addr) + return &EvalNoop{} } -} - -func (n *NodeApplyableResource) evalTreeManagedResource( - stateId string, info *InstanceInfo, - resource *Resource, stateDeps []string) EvalNode { - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var provider ResourceProvider - var diff, diffApply *InstanceDiff - var state *InstanceState - var resourceConfig *ResourceConfig - var err error - var createNew bool - var createBeforeDestroyEnabled bool - - return &EvalSequence{ - Nodes: []EvalNode{ - // Build the instance info - &EvalInstanceInfo{ - Info: info, - }, - - // Get the saved diff for apply - &EvalReadDiff{ - Name: stateId, - Diff: &diffApply, - }, - - // We don't want to do any destroys - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if diffApply == nil { - return true, EvalEarlyExitError{} - } - - if diffApply.GetDestroy() && diffApply.GetAttributesLen() == 0 { - return true, EvalEarlyExitError{} - } - - diffApply.SetDestroy(false) - return true, nil - }, - Then: EvalNoop{}, - }, - - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - destroy := false - if diffApply != nil { - destroy = diffApply.GetDestroy() || diffApply.RequiresNew() - } - - createBeforeDestroyEnabled = - n.Config.Lifecycle.CreateBeforeDestroy && - destroy - - return createBeforeDestroyEnabled, nil - }, - Then: &EvalDeposeState{ - Name: stateId, - }, - }, - - // Normally we interpolate count as a preparation step before - // a DynamicExpand, but an apply graph has pre-expanded nodes - // and so the count would otherwise never be interpolated. - // - // This is redundant when there are multiple instances created - // from the same config (count > 1) but harmless since the - // underlying structures have mutexes to make this concurrency-safe. - // - // In most cases this isn't actually needed because we dealt with - // all of the counts during the plan walk, but we need to do this - // in order to support interpolation of resource counts from - // apply-time-interpolated expressions, such as those in - // "provisioner" blocks. - // - // Here we are just populating the interpolated value in-place - // inside this RawConfig object, like we would in - // NodeAbstractCountResource. - &EvalInterpolate{ - Config: n.Config.RawCount, - ContinueOnErr: true, - }, - - &EvalInterpolate{ - Config: n.Config.RawConfig.Copy(), - Resource: resource, - Output: &resourceConfig, - }, - &EvalGetProvider{ - Name: n.ResolvedProvider, - Output: &provider, - }, - &EvalReadState{ - Name: stateId, - Output: &state, - }, - // Re-run validation to catch any errors we missed, e.g. type - // mismatches on computed values. - &EvalValidateResource{ - Provider: &provider, - Config: &resourceConfig, - ResourceName: n.Config.Name, - ResourceType: n.Config.Type, - ResourceMode: n.Config.Mode, - IgnoreWarnings: true, - }, - &EvalDiff{ - Info: info, - Config: &resourceConfig, - Resource: n.Config, - Provider: &provider, - Diff: &diffApply, - State: &state, - OutputDiff: &diffApply, - }, - - // Get the saved diff - &EvalReadDiff{ - Name: stateId, - Diff: &diff, - }, - - // Compare the diffs - &EvalCompareDiff{ - Info: info, - One: &diff, - Two: &diffApply, - }, - - &EvalGetProvider{ - Name: n.ResolvedProvider, - Output: &provider, - }, - &EvalReadState{ - Name: stateId, - Output: &state, - }, - // Call pre-apply hook - &EvalApplyPre{ - Info: info, - State: &state, - Diff: &diffApply, - }, - &EvalApply{ - Info: info, - State: &state, - Diff: &diffApply, - Provider: &provider, - Output: &state, - Error: &err, - CreateNew: &createNew, - }, - &EvalWriteState{ - Name: stateId, - ResourceType: n.Config.Type, - Provider: n.ResolvedProvider, - Dependencies: stateDeps, - State: &state, - }, - &EvalApplyProvisioners{ - Info: info, - State: &state, - Resource: n.Config, - InterpResource: resource, - CreateNew: &createNew, - Error: &err, - When: config.ProvisionerWhenCreate, - }, - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - return createBeforeDestroyEnabled && err != nil, nil - }, - Then: &EvalUndeposeState{ - Name: stateId, - State: &state, - }, - Else: &EvalWriteState{ - Name: stateId, - ResourceType: n.Config.Type, - Provider: n.ResolvedProvider, - Dependencies: stateDeps, - State: &state, - }, - }, - - // We clear the diff out here so that future nodes - // don't see a diff that is already complete. There - // is no longer a diff! - &EvalWriteDiff{ - Name: stateId, - Diff: nil, - }, - &EvalApplyPost{ - Info: info, - State: &state, - Error: &err, - }, - &EvalUpdateStateHook{}, - }, + return &EvalWriteResourceState{ + Addr: addr.Resource, + Config: config, + ProviderAddr: providerAddr, } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply_instance.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply_instance.go new file mode 100644 index 0000000000..4209605e29 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply_instance.go @@ -0,0 +1,411 @@ +package terraform + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" +) + +// NodeApplyableResourceInstance represents a resource instance that is +// "applyable": it is ready to be applied and is represented by a diff. +// +// This node is for a specific instance of a resource. It will usually be +// accompanied in the graph by a NodeApplyableResource representing its +// containing resource, and should depend on that node to ensure that the +// state is properly prepared to receive changes to instances. +type NodeApplyableResourceInstance struct { + *NodeAbstractResourceInstance + + destroyNode GraphNodeDestroyerCBD + graphNodeDeposer // implementation of GraphNodeDeposer +} + +var ( + _ GraphNodeResource = (*NodeApplyableResourceInstance)(nil) + _ GraphNodeResourceInstance = (*NodeApplyableResourceInstance)(nil) + _ GraphNodeCreator = (*NodeApplyableResourceInstance)(nil) + _ GraphNodeReferencer = (*NodeApplyableResourceInstance)(nil) + _ GraphNodeDeposer = (*NodeApplyableResourceInstance)(nil) + _ GraphNodeEvalable = (*NodeApplyableResourceInstance)(nil) +) + +// GraphNodeAttachDestroyer +func (n *NodeApplyableResourceInstance) AttachDestroyNode(d GraphNodeDestroyerCBD) { + n.destroyNode = d +} + +// createBeforeDestroy checks this nodes config status and the status af any +// companion destroy node for CreateBeforeDestroy. +func (n *NodeApplyableResourceInstance) createBeforeDestroy() bool { + cbd := false + + if n.Config != nil && n.Config.Managed != nil { + cbd = n.Config.Managed.CreateBeforeDestroy + } + + if n.destroyNode != nil { + cbd = cbd || n.destroyNode.CreateBeforeDestroy() + } + + return cbd +} + +// GraphNodeCreator +func (n *NodeApplyableResourceInstance) CreateAddr() *addrs.AbsResourceInstance { + addr := n.ResourceInstanceAddr() + return &addr +} + +// GraphNodeReferencer, overriding NodeAbstractResourceInstance +func (n *NodeApplyableResourceInstance) References() []*addrs.Reference { + // Start with the usual resource instance implementation + ret := n.NodeAbstractResourceInstance.References() + + // Applying a resource must also depend on the destruction of any of its + // dependencies, since this may for example affect the outcome of + // evaluating an entire list of resources with "count" set (by reducing + // the count). + // + // However, we can't do this in create_before_destroy mode because that + // would create a dependency cycle. We make a compromise here of requiring + // changes to be updated across two applies in this case, since the first + // plan will use the old values. + if !n.createBeforeDestroy() { + for _, ref := range ret { + switch tr := ref.Subject.(type) { + case addrs.ResourceInstance: + newRef := *ref // shallow copy so we can mutate + newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) + newRef.Remaining = nil // can't access attributes of something being destroyed + ret = append(ret, &newRef) + case addrs.Resource: + newRef := *ref // shallow copy so we can mutate + newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) + newRef.Remaining = nil // can't access attributes of something being destroyed + ret = append(ret, &newRef) + } + } + } + + return ret +} + +// GraphNodeEvalable +func (n *NodeApplyableResourceInstance) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() + + // State still uses legacy-style internal ids, so we need to shim to get + // a suitable key to use. + stateId := NewLegacyResourceInstanceAddress(addr).stateId() + + // Determine the dependencies for the state. + stateDeps := n.StateReferences() + + // Eval info is different depending on what kind of resource this is + switch n.Config.Mode { + case addrs.ManagedResourceMode: + return n.evalTreeManagedResource(addr, stateId, stateDeps) + case addrs.DataResourceMode: + return n.evalTreeDataResource(addr, stateId, stateDeps) + default: + panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) + } +} + +func (n *NodeApplyableResourceInstance) evalTreeDataResource(addr addrs.AbsResourceInstance, stateId string, stateDeps []addrs.Referenceable) EvalNode { + var provider providers.Interface + var providerSchema *ProviderSchema + var change *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + + // Get the saved diff for apply + &EvalReadDiff{ + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &change, + }, + + // Stop early if we don't actually have a diff + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if change == nil { + return true, EvalEarlyExitError{} + } + return true, nil + }, + Then: EvalNoop{}, + }, + + // In this particular call to EvalReadData we include our planned + // change, which signals that we expect this read to complete fully + // with no unknown values; it'll produce an error if not. + &EvalReadData{ + Addr: addr.Resource, + Config: n.Config, + Dependencies: n.StateReferences(), + Planned: &change, // setting this indicates that the result must be complete + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + OutputState: &state, + }, + + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + + // Clear the diff now that we've applied it, so + // later nodes won't see a diff that's now a no-op. + &EvalWriteDiff{ + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: nil, + }, + + &EvalUpdateStateHook{}, + }, + } +} + +func (n *NodeApplyableResourceInstance) evalTreeManagedResource(addr addrs.AbsResourceInstance, stateId string, stateDeps []addrs.Referenceable) EvalNode { + // Declare a bunch of variables that are used for state during + // evaluation. Most of this are written to by-address below. + var provider providers.Interface + var providerSchema *ProviderSchema + var diff, diffApply *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject + var err error + var createNew bool + var createBeforeDestroyEnabled bool + var configVal cty.Value + var deposedKey states.DeposedKey + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + + // Get the saved diff for apply + &EvalReadDiff{ + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &diffApply, + }, + + // We don't want to do any destroys + // (these are handled by NodeDestroyResourceInstance instead) + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if diffApply == nil { + return true, EvalEarlyExitError{} + } + if diffApply.Action == plans.Delete { + return true, EvalEarlyExitError{} + } + return true, nil + }, + Then: EvalNoop{}, + }, + + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + destroy := false + if diffApply != nil { + destroy = (diffApply.Action == plans.Delete || diffApply.Action.IsReplace()) + } + if destroy && n.createBeforeDestroy() { + createBeforeDestroyEnabled = true + } + return createBeforeDestroyEnabled, nil + }, + Then: &EvalDeposeState{ + Addr: addr.Resource, + ForceKey: n.PreallocatedDeposedKey, + OutputKey: &deposedKey, + }, + }, + + &EvalReadState{ + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + + Output: &state, + }, + + // Get the saved diff + &EvalReadDiff{ + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &diff, + }, + + // Make a new diff, in case we've learned new values in the state + // during apply which we can now incorporate. + &EvalDiff{ + Addr: addr.Resource, + Config: n.Config, + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + PreviousDiff: &diff, + OutputChange: &diffApply, + OutputValue: &configVal, + OutputState: &state, + }, + + // Compare the diffs + &EvalCheckPlannedChange{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + Planned: &diff, + Actual: &diffApply, + }, + + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + &EvalReadState{ + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + + Output: &state, + }, + + &EvalReduceDiff{ + Addr: addr.Resource, + InChange: &diffApply, + Destroy: false, + OutChange: &diffApply, + }, + + // EvalReduceDiff may have simplified our planned change + // into a NoOp if it only requires destroying, since destroying + // is handled by NodeDestroyResourceInstance. + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if diffApply == nil || diffApply.Action == plans.NoOp { + return true, EvalEarlyExitError{} + } + return true, nil + }, + Then: EvalNoop{}, + }, + + // Call pre-apply hook + &EvalApplyPre{ + Addr: addr.Resource, + State: &state, + Change: &diffApply, + }, + &EvalApply{ + Addr: addr.Resource, + Config: n.Config, + Dependencies: n.StateReferences(), + State: &state, + Change: &diffApply, + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + Output: &state, + Error: &err, + CreateNew: &createNew, + }, + &EvalMaybeTainted{ + Addr: addr.Resource, + State: &state, + Change: &diffApply, + Error: &err, + StateOutput: &state, + }, + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + &EvalApplyProvisioners{ + Addr: addr.Resource, + State: &state, // EvalApplyProvisioners will skip if already tainted + ResourceConfig: n.Config, + CreateNew: &createNew, + Error: &err, + When: configs.ProvisionerWhenCreate, + }, + &EvalMaybeTainted{ + Addr: addr.Resource, + State: &state, + Change: &diffApply, + Error: &err, + StateOutput: &state, + }, + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + return createBeforeDestroyEnabled && err != nil, nil + }, + Then: &EvalMaybeRestoreDeposedObject{ + Addr: addr.Resource, + Key: &deposedKey, + }, + }, + + // We clear the diff out here so that future nodes + // don't see a diff that is already complete. There + // is no longer a diff! + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if !diff.Action.IsReplace() { + return true, nil + } + if !n.createBeforeDestroy() { + return true, nil + } + return false, nil + }, + Then: &EvalWriteDiff{ + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: nil, + }, + }, + + &EvalApplyPost{ + Addr: addr.Resource, + State: &state, + Error: &err, + }, + &EvalUpdateStateHook{}, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go index 657bbee7f5..ca2267e479 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go @@ -2,81 +2,114 @@ package terraform import ( "fmt" + "log" - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/states" ) -// NodeDestroyResource represents a resource that is to be destroyed. -type NodeDestroyResource struct { - *NodeAbstractResource +// NodeDestroyResourceInstance represents a resource instance that is to be +// destroyed. +type NodeDestroyResourceInstance struct { + *NodeAbstractResourceInstance + + // If DeposedKey is set to anything other than states.NotDeposed then + // this node destroys a deposed object of the associated instance + // rather than its current object. + DeposedKey states.DeposedKey + + CreateBeforeDestroyOverride *bool } -func (n *NodeDestroyResource) Name() string { - return n.NodeAbstractResource.Name() + " (destroy)" +var ( + _ GraphNodeResource = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeResourceInstance = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeDestroyer = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeDestroyerCBD = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeReferenceable = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeReferencer = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeEvalable = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeProviderConsumer = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeProvisionerConsumer = (*NodeDestroyResourceInstance)(nil) +) + +func (n *NodeDestroyResourceInstance) Name() string { + if n.DeposedKey != states.NotDeposed { + return fmt.Sprintf("%s (destroy deposed %s)", n.ResourceInstanceAddr(), n.DeposedKey) + } + return n.ResourceInstanceAddr().String() + " (destroy)" } // GraphNodeDestroyer -func (n *NodeDestroyResource) DestroyAddr() *ResourceAddress { - return n.Addr +func (n *NodeDestroyResourceInstance) DestroyAddr() *addrs.AbsResourceInstance { + addr := n.ResourceInstanceAddr() + return &addr } // GraphNodeDestroyerCBD -func (n *NodeDestroyResource) CreateBeforeDestroy() bool { +func (n *NodeDestroyResourceInstance) CreateBeforeDestroy() bool { + if n.CreateBeforeDestroyOverride != nil { + return *n.CreateBeforeDestroyOverride + } + // If we have no config, we just assume no - if n.Config == nil { + if n.Config == nil || n.Config.Managed == nil { return false } - return n.Config.Lifecycle.CreateBeforeDestroy + return n.Config.Managed.CreateBeforeDestroy } // GraphNodeDestroyerCBD -func (n *NodeDestroyResource) ModifyCreateBeforeDestroy(v bool) error { - // If we have no config, do nothing since it won't affect the - // create step anyways. - if n.Config == nil { - return nil - } - - // Set CBD to true - n.Config.Lifecycle.CreateBeforeDestroy = true - +func (n *NodeDestroyResourceInstance) ModifyCreateBeforeDestroy(v bool) error { + n.CreateBeforeDestroyOverride = &v return nil } // GraphNodeReferenceable, overriding NodeAbstractResource -func (n *NodeDestroyResource) ReferenceableName() []string { - // We modify our referenceable name to have the suffix of ".destroy" - // since depending on the creation side doesn't necessarilly mean - // depending on destruction. - suffix := ".destroy" - - // If we're CBD, we also append "-cbd". This is because CBD will setup - // its own edges (in CBDEdgeTransformer). Depending on the "destroy" - // side generally doesn't mean depending on CBD as well. See GH-11349 +func (n *NodeDestroyResourceInstance) ReferenceableAddrs() []addrs.Referenceable { + normalAddrs := n.NodeAbstractResourceInstance.ReferenceableAddrs() + destroyAddrs := make([]addrs.Referenceable, len(normalAddrs)) + + phaseType := addrs.ResourceInstancePhaseDestroy if n.CreateBeforeDestroy() { - suffix += "-cbd" + phaseType = addrs.ResourceInstancePhaseDestroyCBD } - result := n.NodeAbstractResource.ReferenceableName() - for i, v := range result { - result[i] = v + suffix + for i, normalAddr := range normalAddrs { + switch ta := normalAddr.(type) { + case addrs.Resource: + destroyAddrs[i] = ta.Phase(phaseType) + case addrs.ResourceInstance: + destroyAddrs[i] = ta.Phase(phaseType) + default: + destroyAddrs[i] = normalAddr + } } - return result + return destroyAddrs } // GraphNodeReferencer, overriding NodeAbstractResource -func (n *NodeDestroyResource) References() []string { +func (n *NodeDestroyResourceInstance) References() []*addrs.Reference { // If we have a config, then we need to include destroy-time dependencies - if c := n.Config; c != nil { - var result []string - for _, p := range c.Provisioners { - // We include conn info and config for destroy time provisioners - // as dependencies that we have. - if p.When == config.ProvisionerWhenDestroy { - result = append(result, ReferencesFromConfig(p.ConnInfo)...) - result = append(result, ReferencesFromConfig(p.RawConfig)...) + if c := n.Config; c != nil && c.Managed != nil { + var result []*addrs.Reference + + // We include conn info and config for destroy time provisioners + // as dependencies that we have. + for _, p := range c.Managed.Provisioners { + schema := n.ProvisionerSchemas[p.Type] + + if p.When == configs.ProvisionerWhenDestroy { + if p.Connection != nil { + result = append(result, ReferencesFromConfig(p.Connection.Config, connectionBlockSupersetSchema)...) + } + result = append(result, ReferencesFromConfig(p.Config, schema)...) } } @@ -86,117 +119,66 @@ func (n *NodeDestroyResource) References() []string { return nil } -// GraphNodeDynamicExpandable -func (n *NodeDestroyResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - // If we have no config we do nothing - if n.Addr == nil { - return nil, nil - } - - state, lock := ctx.State() - lock.RLock() - defer lock.RUnlock() - - // Start creating the steps - steps := make([]GraphTransformer, 0, 5) - - // We want deposed resources in the state to be destroyed - steps = append(steps, &DeposedTransformer{ - State: state, - View: n.Addr.stateId(), - ResolvedProvider: n.ResolvedProvider, - }) - - // Target - steps = append(steps, &TargetsTransformer{ - ParsedTargets: n.Targets, - }) - - // Always end with the root being added - steps = append(steps, &RootTransformer{}) - - // Build the graph - b := &BasicGraphBuilder{ - Steps: steps, - Name: "NodeResourceDestroy", - } - return b.Build(ctx.Path()) -} - // GraphNodeEvalable -func (n *NodeDestroyResource) EvalTree() EvalNode { - // stateId is the ID to put into the state - stateId := n.Addr.stateId() - - // Build the instance info. More of this will be populated during eval - info := &InstanceInfo{ - Id: stateId, - Type: n.Addr.Type, - uniqueExtra: "destroy", - } - - // Build the resource for eval - addr := n.Addr - resource := &Resource{ - Name: addr.Name, - Type: addr.Type, - CountIndex: addr.Index, - } - if resource.CountIndex < 0 { - resource.CountIndex = 0 - } +func (n *NodeDestroyResourceInstance) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() // Get our state rs := n.ResourceState - if rs == nil { - rs = &ResourceState{ - Provider: n.ResolvedProvider, - } + var is *states.ResourceInstance + if rs != nil { + is = rs.Instance(n.InstanceKey) + } + if is == nil { + log.Printf("[WARN] NodeDestroyResourceInstance for %s with no state", addr) } - var diffApply *InstanceDiff - var provider ResourceProvider - var state *InstanceState + var changeApply *plans.ResourceInstanceChange + var provider providers.Interface + var providerSchema *ProviderSchema + var state *states.ResourceInstanceObject var err error return &EvalOpFilter{ Ops: []walkOperation{walkApply, walkDestroy}, Node: &EvalSequence{ Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + // Get the saved diff for apply &EvalReadDiff{ - Name: stateId, - Diff: &diffApply, + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &changeApply, }, - // Filter the diff so we only get the destroy - &EvalFilterDiff{ - Diff: &diffApply, - Output: &diffApply, - Destroy: true, + &EvalReduceDiff{ + Addr: addr.Resource, + InChange: &changeApply, + Destroy: true, + OutChange: &changeApply, }, - // If we're not destroying, then compare diffs + // EvalReduceDiff may have simplified our planned change + // into a NoOp if it does not require destroying. &EvalIf{ If: func(ctx EvalContext) (bool, error) { - if diffApply != nil && diffApply.GetDestroy() { - return true, nil + if changeApply == nil || changeApply.Action == plans.NoOp { + return true, EvalEarlyExitError{} } - - return true, EvalEarlyExitError{} + return true, nil }, Then: EvalNoop{}, }, - // Load the instance info so we have the module path set - &EvalInstanceInfo{Info: info}, - - &EvalGetProvider{ - Name: n.ResolvedProvider, - Output: &provider, - }, &EvalReadState{ - Name: stateId, - Output: &state, + Addr: addr.Resource, + Output: &state, + Provider: &provider, + ProviderSchema: &providerSchema, }, &EvalRequireState{ State: &state, @@ -204,15 +186,15 @@ func (n *NodeDestroyResource) EvalTree() EvalNode { // Call pre-apply hook &EvalApplyPre{ - Info: info, - State: &state, - Diff: &diffApply, + Addr: addr.Resource, + State: &state, + Change: &changeApply, }, // Run destroy provisioners if not tainted &EvalIf{ If: func(ctx EvalContext) (bool, error) { - if state != nil && state.Tainted { + if state != nil && state.Status == states.ObjectTainted { return false, nil } @@ -220,12 +202,11 @@ func (n *NodeDestroyResource) EvalTree() EvalNode { }, Then: &EvalApplyProvisioners{ - Info: info, + Addr: addr.Resource, State: &state, - Resource: n.Config, - InterpResource: resource, + ResourceConfig: n.Config, Error: &err, - When: config.ProvisionerWhenDestroy, + When: configs.ProvisionerWhenDestroy, }, }, @@ -237,7 +218,7 @@ func (n *NodeDestroyResource) EvalTree() EvalNode { }, Then: &EvalApplyPost{ - Info: info, + Addr: addr.Resource, State: &state, Error: &err, }, @@ -246,41 +227,38 @@ func (n *NodeDestroyResource) EvalTree() EvalNode { // Make sure we handle data sources properly. &EvalIf{ If: func(ctx EvalContext) (bool, error) { - if n.Addr == nil { - return false, fmt.Errorf("nil address") - } - - if n.Addr.Mode == config.DataResourceMode { - return true, nil - } - - return false, nil + return addr.Resource.Resource.Mode == addrs.DataResourceMode, nil }, Then: &EvalReadDataApply{ - Info: info, - Diff: &diffApply, - Provider: &provider, - Output: &state, + Addr: addr.Resource, + Config: n.Config, + Change: &changeApply, + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + Output: &state, }, Else: &EvalApply{ - Info: info, - State: &state, - Diff: &diffApply, - Provider: &provider, - Output: &state, - Error: &err, + Addr: addr.Resource, + Config: nil, // No configuration because we are destroying + State: &state, + Change: &changeApply, + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + Output: &state, + Error: &err, }, }, &EvalWriteState{ - Name: stateId, - ResourceType: n.Addr.Type, - Provider: n.ResolvedProvider, - Dependencies: rs.Dependencies, - State: &state, + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, }, &EvalApplyPost{ - Info: info, + Addr: addr.Resource, State: &state, Error: &err, }, @@ -289,3 +267,55 @@ func (n *NodeDestroyResource) EvalTree() EvalNode { }, } } + +// NodeDestroyResourceInstance represents a resource that is to be destroyed. +// +// Destroying a resource is a state-only operation: it is the individual +// instances being destroyed that affects remote objects. During graph +// construction, NodeDestroyResource should always depend on any other node +// related to the given resource, since it's just a final cleanup to avoid +// leaving skeleton resource objects in state after their instances have +// all been destroyed. +type NodeDestroyResource struct { + *NodeAbstractResource +} + +var ( + _ GraphNodeResource = (*NodeDestroyResource)(nil) + _ GraphNodeReferenceable = (*NodeDestroyResource)(nil) + _ GraphNodeReferencer = (*NodeDestroyResource)(nil) + _ GraphNodeEvalable = (*NodeDestroyResource)(nil) +) + +func (n *NodeDestroyResource) Name() string { + return n.ResourceAddr().String() + " (clean up state)" +} + +// GraphNodeReferenceable, overriding NodeAbstractResource +func (n *NodeDestroyResource) ReferenceableAddrs() []addrs.Referenceable { + // NodeDestroyResource doesn't participate in references: the graph + // builder that created it should ensure directly that it already depends + // on every other node related to its resource, without relying on + // references. + return nil +} + +// GraphNodeReferencer, overriding NodeAbstractResource +func (n *NodeDestroyResource) References() []*addrs.Reference { + // NodeDestroyResource doesn't participate in references: the graph + // builder that created it should ensure directly that it already depends + // on every other node related to its resource, without relying on + // references. + return nil +} + +// GraphNodeEvalable +func (n *NodeDestroyResource) EvalTree() EvalNode { + // This EvalNode will produce an error if the resource isn't already + // empty by the time it is called, since it should just be pruning the + // leftover husk of a resource in state after all of the child instances + // and their objects were destroyed. + return &EvalForgetResourceState{ + Addr: n.ResourceAddr().Resource, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy_deposed.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy_deposed.go new file mode 100644 index 0000000000..67c46913f4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy_deposed.go @@ -0,0 +1,313 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" +) + +// ConcreteResourceInstanceDeposedNodeFunc is a callback type used to convert +// an abstract resource instance to a concrete one of some type that has +// an associated deposed object key. +type ConcreteResourceInstanceDeposedNodeFunc func(*NodeAbstractResourceInstance, states.DeposedKey) dag.Vertex + +type GraphNodeDeposedResourceInstanceObject interface { + DeposedInstanceObjectKey() states.DeposedKey +} + +// NodePlanDeposedResourceInstanceObject represents deposed resource +// instance objects during plan. These are distinct from the primary object +// for each resource instance since the only valid operation to do with them +// is to destroy them. +// +// This node type is also used during the refresh walk to ensure that the +// record of a deposed object is up-to-date before we plan to destroy it. +type NodePlanDeposedResourceInstanceObject struct { + *NodeAbstractResourceInstance + DeposedKey states.DeposedKey +} + +var ( + _ GraphNodeDeposedResourceInstanceObject = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeResource = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeResourceInstance = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeReferenceable = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeReferencer = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeEvalable = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeProviderConsumer = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeProvisionerConsumer = (*NodePlanDeposedResourceInstanceObject)(nil) +) + +func (n *NodePlanDeposedResourceInstanceObject) Name() string { + return fmt.Sprintf("%s (deposed %s)", n.ResourceInstanceAddr().String(), n.DeposedKey) +} + +func (n *NodePlanDeposedResourceInstanceObject) DeposedInstanceObjectKey() states.DeposedKey { + return n.DeposedKey +} + +// GraphNodeReferenceable implementation, overriding the one from NodeAbstractResourceInstance +func (n *NodePlanDeposedResourceInstanceObject) ReferenceableAddrs() []addrs.Referenceable { + // Deposed objects don't participate in references. + return nil +} + +// GraphNodeReferencer implementation, overriding the one from NodeAbstractResourceInstance +func (n *NodePlanDeposedResourceInstanceObject) References() []*addrs.Reference { + // We don't evaluate configuration for deposed objects, so they effectively + // make no references. + return nil +} + +// GraphNodeEvalable impl. +func (n *NodePlanDeposedResourceInstanceObject) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() + + var provider providers.Interface + var providerSchema *ProviderSchema + var state *states.ResourceInstanceObject + + seq := &EvalSequence{Nodes: make([]EvalNode, 0, 5)} + + // During the refresh walk we will ensure that our record of the deposed + // object is up-to-date. If it was already deleted outside of Terraform + // then this will remove it from state and thus avoid us planning a + // destroy for it during the subsequent plan walk. + seq.Nodes = append(seq.Nodes, &EvalOpFilter{ + Ops: []walkOperation{walkRefresh}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + &EvalReadStateDeposed{ + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + Key: n.DeposedKey, + Output: &state, + }, + &EvalRefresh{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + Provider: &provider, + ProviderSchema: &providerSchema, + State: &state, + Output: &state, + }, + &EvalWriteStateDeposed{ + Addr: addr.Resource, + Key: n.DeposedKey, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + }, + }, + }) + + // During the plan walk we always produce a planned destroy change, because + // destroying is the only supported action for deposed objects. + var change *plans.ResourceInstanceChange + seq.Nodes = append(seq.Nodes, &EvalOpFilter{ + Ops: []walkOperation{walkPlan, walkPlanDestroy}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + &EvalReadStateDeposed{ + Addr: addr.Resource, + Output: &state, + Key: n.DeposedKey, + Provider: &provider, + ProviderSchema: &providerSchema, + }, + &EvalDiffDestroy{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + DeposedKey: n.DeposedKey, + State: &state, + Output: &change, + }, + &EvalWriteDiff{ + Addr: addr.Resource, + DeposedKey: n.DeposedKey, + ProviderSchema: &providerSchema, + Change: &change, + }, + // Since deposed objects cannot be referenced by expressions + // elsewhere, we don't need to also record the planned new + // state in this case. + }, + }, + }) + + return seq +} + +// NodeDestroyDeposedResourceInstanceObject represents deposed resource +// instance objects during apply. Nodes of this type are inserted by +// DiffTransformer when the planned changeset contains "delete" changes for +// deposed instance objects, and its only supported operation is to destroy +// and then forget the associated object. +type NodeDestroyDeposedResourceInstanceObject struct { + *NodeAbstractResourceInstance + DeposedKey states.DeposedKey +} + +var ( + _ GraphNodeDeposedResourceInstanceObject = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeResource = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeResourceInstance = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeDestroyer = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeDestroyerCBD = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeReferenceable = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeReferencer = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeEvalable = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeProviderConsumer = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeProvisionerConsumer = (*NodeDestroyDeposedResourceInstanceObject)(nil) +) + +func (n *NodeDestroyDeposedResourceInstanceObject) Name() string { + return fmt.Sprintf("%s (destroy deposed %s)", n.Addr.String(), n.DeposedKey) +} + +func (n *NodeDestroyDeposedResourceInstanceObject) DeposedInstanceObjectKey() states.DeposedKey { + return n.DeposedKey +} + +// GraphNodeReferenceable implementation, overriding the one from NodeAbstractResourceInstance +func (n *NodeDestroyDeposedResourceInstanceObject) ReferenceableAddrs() []addrs.Referenceable { + // Deposed objects don't participate in references. + return nil +} + +// GraphNodeReferencer implementation, overriding the one from NodeAbstractResourceInstance +func (n *NodeDestroyDeposedResourceInstanceObject) References() []*addrs.Reference { + // We don't evaluate configuration for deposed objects, so they effectively + // make no references. + return nil +} + +// GraphNodeDestroyer +func (n *NodeDestroyDeposedResourceInstanceObject) DestroyAddr() *addrs.AbsResourceInstance { + addr := n.ResourceInstanceAddr() + return &addr +} + +// GraphNodeDestroyerCBD +func (n *NodeDestroyDeposedResourceInstanceObject) CreateBeforeDestroy() bool { + // A deposed instance is always CreateBeforeDestroy by definition, since + // we use deposed only to handle create-before-destroy. + return true +} + +// GraphNodeDestroyerCBD +func (n *NodeDestroyDeposedResourceInstanceObject) ModifyCreateBeforeDestroy(v bool) error { + if !v { + // Should never happen: deposed instances are _always_ create_before_destroy. + return fmt.Errorf("can't deactivate create_before_destroy for a deposed instance") + } + return nil +} + +// GraphNodeEvalable impl. +func (n *NodeDestroyDeposedResourceInstanceObject) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() + + var provider providers.Interface + var providerSchema *ProviderSchema + var state *states.ResourceInstanceObject + var change *plans.ResourceInstanceChange + var err error + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + &EvalReadStateDeposed{ + Addr: addr.Resource, + Output: &state, + Key: n.DeposedKey, + Provider: &provider, + ProviderSchema: &providerSchema, + }, + &EvalDiffDestroy{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + State: &state, + Output: &change, + }, + // Call pre-apply hook + &EvalApplyPre{ + Addr: addr.Resource, + State: &state, + Change: &change, + }, + &EvalApply{ + Addr: addr.Resource, + Config: nil, // No configuration because we are destroying + State: &state, + Change: &change, + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + Output: &state, + Error: &err, + }, + // Always write the resource back to the state deposed... if it + // was successfully destroyed it will be pruned. If it was not, it will + // be caught on the next run. + &EvalWriteStateDeposed{ + Addr: addr.Resource, + Key: n.DeposedKey, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + &EvalApplyPost{ + Addr: addr.Resource, + State: &state, + Error: &err, + }, + &EvalReturnError{ + Error: &err, + }, + &EvalUpdateStateHook{}, + }, + } +} + +// GraphNodeDeposer is an optional interface implemented by graph nodes that +// might create a single new deposed object for a specific associated resource +// instance, allowing a caller to optionally pre-allocate a DeposedKey for +// it. +type GraphNodeDeposer interface { + // SetPreallocatedDeposedKey will be called during graph construction + // if a particular node must use a pre-allocated deposed key if/when it + // "deposes" the current object of its associated resource instance. + SetPreallocatedDeposedKey(key states.DeposedKey) +} + +// graphNodeDeposer is an embeddable implementation of GraphNodeDeposer. +// Embed it in a node type to get automatic support for it, and then access +// the field PreallocatedDeposedKey to access any pre-allocated key. +type graphNodeDeposer struct { + PreallocatedDeposedKey states.DeposedKey +} + +func (n *graphNodeDeposer) SetPreallocatedDeposedKey(key states.DeposedKey) { + n.PreallocatedDeposedKey = key +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go index 1afae7a048..633c1c4662 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go @@ -1,47 +1,119 @@ package terraform import ( + "log" + "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/tfdiags" ) // NodePlannableResource represents a resource that is "plannable": // it is ready to be planned in order to create a diff. type NodePlannableResource struct { - *NodeAbstractCountResource + *NodeAbstractResource + + // ForceCreateBeforeDestroy might be set via our GraphNodeDestroyerCBD + // during graph construction, if dependencies require us to force this + // on regardless of what the configuration says. + ForceCreateBeforeDestroy *bool +} + +var ( + _ GraphNodeSubPath = (*NodePlannableResource)(nil) + _ GraphNodeDestroyerCBD = (*NodePlannableResource)(nil) + _ GraphNodeDynamicExpandable = (*NodePlannableResource)(nil) + _ GraphNodeReferenceable = (*NodePlannableResource)(nil) + _ GraphNodeReferencer = (*NodePlannableResource)(nil) + _ GraphNodeResource = (*NodePlannableResource)(nil) + _ GraphNodeAttachResourceConfig = (*NodePlannableResource)(nil) +) + +// GraphNodeEvalable +func (n *NodePlannableResource) EvalTree() EvalNode { + addr := n.ResourceAddr() + config := n.Config + + if config == nil { + // Nothing to do, then. + log.Printf("[TRACE] NodeApplyableResource: no configuration present for %s", addr) + return &EvalNoop{} + } + + // this ensures we can reference the resource even if the count is 0 + return &EvalWriteResourceState{ + Addr: addr.Resource, + Config: config, + ProviderAddr: n.ResolvedProvider, + } +} + +// GraphNodeDestroyerCBD +func (n *NodePlannableResource) CreateBeforeDestroy() bool { + if n.ForceCreateBeforeDestroy != nil { + return *n.ForceCreateBeforeDestroy + } + + // If we have no config, we just assume no + if n.Config == nil || n.Config.Managed == nil { + return false + } + + return n.Config.Managed.CreateBeforeDestroy +} + +// GraphNodeDestroyerCBD +func (n *NodePlannableResource) ModifyCreateBeforeDestroy(v bool) error { + n.ForceCreateBeforeDestroy = &v + return nil } // GraphNodeDynamicExpandable func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - // Grab the state which we read - state, lock := ctx.State() - lock.RLock() - defer lock.RUnlock() - - // Expand the resource count which must be available by now from EvalTree - count, err := n.Config.Count() - if err != nil { - return nil, err + var diags tfdiags.Diagnostics + + count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx) + diags = diags.Append(countDiags) + if countDiags.HasErrors() { + return nil, diags.Err() } + // Next we need to potentially rename an instance address in the state + // if we're transitioning whether "count" is set at all. + fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1) + + // Our graph transformers require access to the full state, so we'll + // temporarily lock it while we work on this. + state := ctx.State().Lock() + defer ctx.State().Unlock() + // The concrete resource factory we'll use - concreteResource := func(a *NodeAbstractResource) dag.Vertex { + concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex { // Add the config and state since we don't do that via transforms a.Config = n.Config a.ResolvedProvider = n.ResolvedProvider + a.Schema = n.Schema + a.ProvisionerSchemas = n.ProvisionerSchemas return &NodePlannableResourceInstance{ - NodeAbstractResource: a, + NodeAbstractResourceInstance: a, + + // By the time we're walking, we've figured out whether we need + // to force on CreateBeforeDestroy due to dependencies on other + // nodes that have it. + ForceCreateBeforeDestroy: n.CreateBeforeDestroy(), } } - // The concrete resource factory we'll use for oprhans - concreteResourceOrphan := func(a *NodeAbstractResource) dag.Vertex { + // The concrete resource factory we'll use for orphans + concreteResourceOrphan := func(a *NodeAbstractResourceInstance) dag.Vertex { // Add the config and state since we don't do that via transforms a.Config = n.Config a.ResolvedProvider = n.ResolvedProvider + a.Schema = n.Schema + a.ProvisionerSchemas = n.ProvisionerSchemas - return &NodePlannableResourceOrphan{ - NodeAbstractResource: a, + return &NodePlannableResourceInstanceOrphan{ + NodeAbstractResourceInstance: a, } } @@ -50,6 +122,7 @@ func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) { // Expand the count. &ResourceCountTransformer{ Concrete: concreteResource, + Schema: n.Schema, Count: count, Addr: n.ResourceAddr(), }, @@ -66,7 +139,7 @@ func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) { &AttachStateTransformer{State: state}, // Targeting - &TargetsTransformer{ParsedTargets: n.Targets}, + &TargetsTransformer{Targets: n.Targets}, // Connect references so ordering is correct &ReferenceTransformer{}, @@ -81,5 +154,6 @@ func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) { Validate: true, Name: "NodePlannableResource", } - return b.Build(ctx.Path()) + graph, diags := b.Build(ctx.Path()) + return graph, diags.ErrWithWarnings() } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go index 9b02362b6f..38746f0d35 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go @@ -1,52 +1,87 @@ package terraform -// NodePlanDestroyableResource represents a resource that is "applyable": -// it is ready to be applied and is represented by a diff. -type NodePlanDestroyableResource struct { - *NodeAbstractResource +import ( + "fmt" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" +) + +// NodePlanDestroyableResourceInstance represents a resource that is ready +// to be planned for destruction. +type NodePlanDestroyableResourceInstance struct { + *NodeAbstractResourceInstance } +var ( + _ GraphNodeSubPath = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeReferenceable = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeReferencer = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeDestroyer = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeResource = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeResourceInstance = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeAttachResourceConfig = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeAttachResourceState = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeEvalable = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeProviderConsumer = (*NodePlanDestroyableResourceInstance)(nil) +) + // GraphNodeDestroyer -func (n *NodePlanDestroyableResource) DestroyAddr() *ResourceAddress { - return n.Addr +func (n *NodePlanDestroyableResourceInstance) DestroyAddr() *addrs.AbsResourceInstance { + addr := n.ResourceInstanceAddr() + return &addr } // GraphNodeEvalable -func (n *NodePlanDestroyableResource) EvalTree() EvalNode { - addr := n.NodeAbstractResource.Addr +func (n *NodePlanDestroyableResourceInstance) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() - // stateId is the ID to put into the state - stateId := addr.stateId() + // Declare a bunch of variables that are used for state during + // evaluation. These are written to by address in the EvalNodes we + // declare below. + var provider providers.Interface + var providerSchema *ProviderSchema + var change *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject - // Build the instance info. More of this will be populated during eval - info := &InstanceInfo{ - Id: stateId, - Type: addr.Type, + if n.ResolvedProvider.ProviderConfig.Type == "" { + // Should never happen; indicates that the graph was not constructed + // correctly since we didn't get our provider attached. + panic(fmt.Sprintf("%T %q was not assigned a resolved provider", n, dag.VertexName(n))) } - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var diff *InstanceDiff - var state *InstanceState - return &EvalSequence{ Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, &EvalReadState{ - Name: stateId, + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + Output: &state, }, &EvalDiffDestroy{ - Info: info, - State: &state, - Output: &diff, + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + State: &state, + Output: &change, }, &EvalCheckPreventDestroy{ - Resource: n.Config, - Diff: &diff, + Addr: addr.Resource, + Config: n.Config, + Change: &change, }, &EvalWriteDiff{ - Name: stateId, - Diff: &diff, + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &change, }, }, } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go index 7d9fcddb55..d69472f820 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go @@ -3,187 +3,182 @@ package terraform import ( "fmt" - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + + "github.com/hashicorp/terraform/addrs" + "github.com/zclconf/go-cty/cty" ) // NodePlannableResourceInstance represents a _single_ resource // instance that is plannable. This means this represents a single // count index, for example. type NodePlannableResourceInstance struct { - *NodeAbstractResource + *NodeAbstractResourceInstance + ForceCreateBeforeDestroy bool } +var ( + _ GraphNodeSubPath = (*NodePlannableResourceInstance)(nil) + _ GraphNodeReferenceable = (*NodePlannableResourceInstance)(nil) + _ GraphNodeReferencer = (*NodePlannableResourceInstance)(nil) + _ GraphNodeResource = (*NodePlannableResourceInstance)(nil) + _ GraphNodeResourceInstance = (*NodePlannableResourceInstance)(nil) + _ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstance)(nil) + _ GraphNodeAttachResourceState = (*NodePlannableResourceInstance)(nil) + _ GraphNodeEvalable = (*NodePlannableResourceInstance)(nil) +) + // GraphNodeEvalable func (n *NodePlannableResourceInstance) EvalTree() EvalNode { - addr := n.NodeAbstractResource.Addr - - // stateId is the ID to put into the state - stateId := addr.stateId() + addr := n.ResourceInstanceAddr() - // Build the instance info. More of this will be populated during eval - info := &InstanceInfo{ - Id: stateId, - Type: addr.Type, - ModulePath: normalizeModulePath(addr.Path), - } - - // Build the resource for eval - resource := &Resource{ - Name: addr.Name, - Type: addr.Type, - CountIndex: addr.Index, - } - if resource.CountIndex < 0 { - resource.CountIndex = 0 - } + // State still uses legacy-style internal ids, so we need to shim to get + // a suitable key to use. + stateId := NewLegacyResourceInstanceAddress(addr).stateId() // Determine the dependencies for the state. stateDeps := n.StateReferences() // Eval info is different depending on what kind of resource this is - switch n.Config.Mode { - case config.ManagedResourceMode: - return n.evalTreeManagedResource( - stateId, info, resource, stateDeps, - ) - case config.DataResourceMode: - return n.evalTreeDataResource( - stateId, info, resource, stateDeps) + switch addr.Resource.Resource.Mode { + case addrs.ManagedResourceMode: + return n.evalTreeManagedResource(addr, stateId, stateDeps) + case addrs.DataResourceMode: + return n.evalTreeDataResource(addr, stateId, stateDeps) default: panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) } } -func (n *NodePlannableResourceInstance) evalTreeDataResource( - stateId string, info *InstanceInfo, - resource *Resource, stateDeps []string) EvalNode { - var provider ResourceProvider - var config *ResourceConfig - var diff *InstanceDiff - var state *InstanceState +func (n *NodePlannableResourceInstance) evalTreeDataResource(addr addrs.AbsResourceInstance, stateId string, stateDeps []addrs.Referenceable) EvalNode { + config := n.Config + var provider providers.Interface + var providerSchema *ProviderSchema + var change *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject + var configVal cty.Value return &EvalSequence{ Nodes: []EvalNode{ - &EvalReadState{ - Name: stateId, - Output: &state, + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, }, - // We need to re-interpolate the config here because some - // of the attributes may have become computed during - // earlier planning, due to other resources having - // "requires new resource" diffs. - &EvalInterpolate{ - Config: n.Config.RawConfig.Copy(), - Resource: resource, - Output: &config, + &EvalReadState{ + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + + Output: &state, }, + // If we already have a non-planned state then we already dealt + // with this during the refresh walk and so we have nothing to do + // here. &EvalIf{ If: func(ctx EvalContext) (bool, error) { - computed := config.ComputedKeys != nil && len(config.ComputedKeys) > 0 - - // If the configuration is complete and we - // already have a state then we don't need to - // do any further work during apply, because we - // already populated the state during refresh. - if !computed && state != nil { + if state != nil && state.Status != states.ObjectPlanned { return true, EvalEarlyExitError{} } - return true, nil }, Then: EvalNoop{}, }, - &EvalGetProvider{ - Name: n.ResolvedProvider, - Output: &provider, + &EvalValidateSelfRef{ + Addr: addr.Resource, + Config: config.Config, + ProviderSchema: &providerSchema, }, - &EvalReadDataDiff{ - Info: info, - Config: &config, - Provider: &provider, - Output: &diff, - OutputState: &state, + &EvalReadData{ + Addr: addr.Resource, + Config: n.Config, + Dependencies: n.StateReferences(), + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + ForcePlanRead: true, // _always_ produce a Read change, even if the config seems ready + OutputChange: &change, + OutputValue: &configVal, + OutputState: &state, }, &EvalWriteState{ - Name: stateId, - ResourceType: n.Config.Type, - Provider: n.ResolvedProvider, - Dependencies: stateDeps, - State: &state, + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, }, &EvalWriteDiff{ - Name: stateId, - Diff: &diff, + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &change, }, }, } } -func (n *NodePlannableResourceInstance) evalTreeManagedResource( - stateId string, info *InstanceInfo, - resource *Resource, stateDeps []string) EvalNode { - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var provider ResourceProvider - var diff *InstanceDiff - var state *InstanceState - var resourceConfig *ResourceConfig +func (n *NodePlannableResourceInstance) evalTreeManagedResource(addr addrs.AbsResourceInstance, stateId string, stateDeps []addrs.Referenceable) EvalNode { + config := n.Config + var provider providers.Interface + var providerSchema *ProviderSchema + var change *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject return &EvalSequence{ Nodes: []EvalNode{ - &EvalInterpolate{ - Config: n.Config.RawConfig.Copy(), - Resource: resource, - Output: &resourceConfig, - }, &EvalGetProvider{ - Name: n.ResolvedProvider, + Addr: n.ResolvedProvider, Output: &provider, + Schema: &providerSchema, }, - // Re-run validation to catch any errors we missed, e.g. type - // mismatches on computed values. - &EvalValidateResource{ - Provider: &provider, - Config: &resourceConfig, - ResourceName: n.Config.Name, - ResourceType: n.Config.Type, - ResourceMode: n.Config.Mode, - IgnoreWarnings: true, - }, + &EvalReadState{ - Name: stateId, + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + Output: &state, }, + + &EvalValidateSelfRef{ + Addr: addr.Resource, + Config: config.Config, + ProviderSchema: &providerSchema, + }, + &EvalDiff{ - Name: stateId, - Info: info, - Config: &resourceConfig, - Resource: n.Config, - Provider: &provider, - State: &state, - OutputDiff: &diff, - OutputState: &state, + Addr: addr.Resource, + Config: n.Config, + CreateBeforeDestroy: n.ForceCreateBeforeDestroy, + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + OutputChange: &change, + OutputState: &state, }, &EvalCheckPreventDestroy{ - Resource: n.Config, - Diff: &diff, + Addr: addr.Resource, + Config: n.Config, + Change: &change, }, &EvalWriteState{ - Name: stateId, - ResourceType: n.Config.Type, - Provider: n.ResolvedProvider, - Dependencies: stateDeps, - State: &state, + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + State: &state, + ProviderSchema: &providerSchema, }, &EvalWriteDiff{ - Name: stateId, - Diff: &diff, + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &change, }, }, } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go index 73d6e41f54..841669491a 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go @@ -1,53 +1,83 @@ package terraform -// NodePlannableResourceOrphan represents a resource that is "applyable": +import ( + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" +) + +// NodePlannableResourceInstanceOrphan represents a resource that is "applyable": // it is ready to be applied and is represented by a diff. -type NodePlannableResourceOrphan struct { - *NodeAbstractResource +type NodePlannableResourceInstanceOrphan struct { + *NodeAbstractResourceInstance } -func (n *NodePlannableResourceOrphan) Name() string { - return n.NodeAbstractResource.Name() + " (orphan)" -} +var ( + _ GraphNodeSubPath = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeReferenceable = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeReferencer = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeResource = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeResourceInstance = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeAttachResourceState = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeEvalable = (*NodePlannableResourceInstanceOrphan)(nil) +) -// GraphNodeEvalable -func (n *NodePlannableResourceOrphan) EvalTree() EvalNode { - addr := n.NodeAbstractResource.Addr +var ( + _ GraphNodeEvalable = (*NodePlannableResourceInstanceOrphan)(nil) +) - // stateId is the ID to put into the state - stateId := addr.stateId() +func (n *NodePlannableResourceInstanceOrphan) Name() string { + return n.ResourceInstanceAddr().String() + " (orphan)" +} - // Build the instance info. More of this will be populated during eval - info := &InstanceInfo{ - Id: stateId, - Type: addr.Type, - ModulePath: normalizeModulePath(addr.Path), - } +// GraphNodeEvalable +func (n *NodePlannableResourceInstanceOrphan) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() // Declare a bunch of variables that are used for state during // evaluation. Most of this are written to by-address below. - var diff *InstanceDiff - var state *InstanceState + var change *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject + var provider providers.Interface + var providerSchema *ProviderSchema return &EvalSequence{ Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, &EvalReadState{ - Name: stateId, + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + Output: &state, }, &EvalDiffDestroy{ - Info: info, - State: &state, - Output: &diff, + Addr: addr.Resource, + State: &state, + ProviderAddr: n.ResolvedProvider, + Output: &change, + OutputState: &state, // Will point to a nil state after this complete, signalling destroyed }, &EvalCheckPreventDestroy{ - Resource: n.Config, - ResourceId: stateId, - Diff: &diff, + Addr: addr.Resource, + Config: n.Config, + Change: &change, }, &EvalWriteDiff{ - Name: stateId, - Diff: &diff, + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &change, + }, + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, }, }, } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go index 697bd4942b..9506023206 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go @@ -2,38 +2,60 @@ package terraform import ( "fmt" + "log" - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + + "github.com/hashicorp/terraform/states" + + "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/tfdiags" ) // NodeRefreshableManagedResource represents a resource that is expanabled into // NodeRefreshableManagedResourceInstance. Resource count orphans are also added. type NodeRefreshableManagedResource struct { - *NodeAbstractCountResource + *NodeAbstractResource } +var ( + _ GraphNodeSubPath = (*NodeRefreshableManagedResource)(nil) + _ GraphNodeDynamicExpandable = (*NodeRefreshableManagedResource)(nil) + _ GraphNodeReferenceable = (*NodeRefreshableManagedResource)(nil) + _ GraphNodeReferencer = (*NodeRefreshableManagedResource)(nil) + _ GraphNodeResource = (*NodeRefreshableManagedResource)(nil) + _ GraphNodeAttachResourceConfig = (*NodeRefreshableManagedResource)(nil) +) + // GraphNodeDynamicExpandable func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - // Grab the state which we read - state, lock := ctx.State() - lock.RLock() - defer lock.RUnlock() - - // Expand the resource count which must be available by now from EvalTree - count, err := n.Config.Count() - if err != nil { - return nil, err + var diags tfdiags.Diagnostics + + count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx) + diags = diags.Append(countDiags) + if countDiags.HasErrors() { + return nil, diags.Err() } + // Next we need to potentially rename an instance address in the state + // if we're transitioning whether "count" is set at all. + fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1) + + // Our graph transformers require access to the full state, so we'll + // temporarily lock it while we work on this. + state := ctx.State().Lock() + defer ctx.State().Unlock() + // The concrete resource factory we'll use - concreteResource := func(a *NodeAbstractResource) dag.Vertex { + concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex { // Add the config and state since we don't do that via transforms a.Config = n.Config a.ResolvedProvider = n.ResolvedProvider return &NodeRefreshableManagedResourceInstance{ - NodeAbstractResource: a, + NodeAbstractResourceInstance: a, } } @@ -42,6 +64,7 @@ func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph, // Expand the count. &ResourceCountTransformer{ Concrete: concreteResource, + Schema: n.Schema, Count: count, Addr: n.ResourceAddr(), }, @@ -59,7 +82,7 @@ func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph, &AttachStateTransformer{State: state}, // Targeting - &TargetsTransformer{ParsedTargets: n.Targets}, + &TargetsTransformer{Targets: n.Targets}, // Connect references so ordering is correct &ReferenceTransformer{}, @@ -75,66 +98,76 @@ func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph, Name: "NodeRefreshableManagedResource", } - return b.Build(ctx.Path()) + graph, diags := b.Build(ctx.Path()) + return graph, diags.ErrWithWarnings() } // NodeRefreshableManagedResourceInstance represents a resource that is "applyable": // it is ready to be applied and is represented by a diff. type NodeRefreshableManagedResourceInstance struct { - *NodeAbstractResource + *NodeAbstractResourceInstance } +var ( + _ GraphNodeSubPath = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeReferenceable = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeReferencer = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeDestroyer = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeResource = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeResourceInstance = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeAttachResourceConfig = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeAttachResourceState = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeEvalable = (*NodeRefreshableManagedResourceInstance)(nil) +) + // GraphNodeDestroyer -func (n *NodeRefreshableManagedResourceInstance) DestroyAddr() *ResourceAddress { - return n.Addr +func (n *NodeRefreshableManagedResourceInstance) DestroyAddr() *addrs.AbsResourceInstance { + addr := n.ResourceInstanceAddr() + return &addr } // GraphNodeEvalable func (n *NodeRefreshableManagedResourceInstance) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() + // Eval info is different depending on what kind of resource this is - switch mode := n.Addr.Mode; mode { - case config.ManagedResourceMode: + switch addr.Resource.Resource.Mode { + case addrs.ManagedResourceMode: if n.ResourceState == nil { + log.Printf("[TRACE] NodeRefreshableManagedResourceInstance: %s has no existing state to refresh", addr) return n.evalTreeManagedResourceNoState() } + log.Printf("[TRACE] NodeRefreshableManagedResourceInstance: %s will be refreshed", addr) return n.evalTreeManagedResource() - case config.DataResourceMode: + case addrs.DataResourceMode: // Get the data source node. If we don't have a configuration // then it is an orphan so we destroy it (remove it from the state). var dn GraphNodeEvalable if n.Config != nil { dn = &NodeRefreshableDataResourceInstance{ - NodeAbstractResource: n.NodeAbstractResource, + NodeAbstractResourceInstance: n.NodeAbstractResourceInstance, } } else { - dn = &NodeDestroyableDataResource{ - NodeAbstractResource: n.NodeAbstractResource, + dn = &NodeDestroyableDataResourceInstance{ + NodeAbstractResourceInstance: n.NodeAbstractResourceInstance, } } return dn.EvalTree() default: - panic(fmt.Errorf("unsupported resource mode %s", mode)) + panic(fmt.Errorf("unsupported resource mode %s", addr.Resource.Resource.Mode)) } } func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalNode { - addr := n.NodeAbstractResource.Addr - - // stateId is the ID to put into the state - stateId := addr.stateId() - - // Build the instance info. More of this will be populated during eval - info := &InstanceInfo{ - Id: stateId, - Type: addr.Type, - } + addr := n.ResourceInstanceAddr() // Declare a bunch of variables that are used for state during // evaluation. Most of this are written to by-address below. - var provider ResourceProvider - var state *InstanceState + var provider providers.Interface + var providerSchema *ProviderSchema + var state *states.ResourceInstanceObject // This happened during initial development. All known cases were // fixed and tested but as a sanity check let's assert here. @@ -150,25 +183,33 @@ func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalN return &EvalSequence{ Nodes: []EvalNode{ &EvalGetProvider{ - Name: n.ResolvedProvider, + Addr: n.ResolvedProvider, Output: &provider, + Schema: &providerSchema, }, + &EvalReadState{ - Name: stateId, + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + Output: &state, }, + &EvalRefresh{ - Info: info, - Provider: &provider, - State: &state, - Output: &state, + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + Provider: &provider, + ProviderSchema: &providerSchema, + State: &state, + Output: &state, }, + &EvalWriteState{ - Name: stateId, - ResourceType: n.ResourceState.Type, - Provider: n.ResolvedProvider, - Dependencies: n.ResourceState.Dependencies, - State: &state, + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, }, }, } @@ -186,80 +227,62 @@ func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalN // plan, but nothing is done with the diff after it is created - it is dropped, // and its changes are not counted in the UI. func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResourceNoState() EvalNode { + addr := n.ResourceInstanceAddr() + // Declare a bunch of variables that are used for state during // evaluation. Most of this are written to by-address below. - var provider ResourceProvider - var state *InstanceState - var resourceConfig *ResourceConfig - - addr := n.NodeAbstractResource.Addr - stateID := addr.stateId() - info := &InstanceInfo{ - Id: stateID, - Type: addr.Type, - ModulePath: normalizeModulePath(addr.Path), - } - - // Build the resource for eval - resource := &Resource{ - Name: addr.Name, - Type: addr.Type, - CountIndex: addr.Index, - } - if resource.CountIndex < 0 { - resource.CountIndex = 0 - } - - // Determine the dependencies for the state. - stateDeps := n.StateReferences() - - // n.Config can be nil if the config and state don't match - var raw *config.RawConfig - if n.Config != nil { - raw = n.Config.RawConfig.Copy() - } + var provider providers.Interface + var providerSchema *ProviderSchema + var change *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject return &EvalSequence{ Nodes: []EvalNode{ - &EvalInterpolate{ - Config: raw, - Resource: resource, - Output: &resourceConfig, - }, &EvalGetProvider{ - Name: n.ResolvedProvider, + Addr: n.ResolvedProvider, Output: &provider, + Schema: &providerSchema, }, - // Re-run validation to catch any errors we missed, e.g. type - // mismatches on computed values. - &EvalValidateResource{ - Provider: &provider, - Config: &resourceConfig, - ResourceName: n.Config.Name, - ResourceType: n.Config.Type, - ResourceMode: n.Config.Mode, - IgnoreWarnings: true, - }, + &EvalReadState{ - Name: stateID, + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + Output: &state, }, + &EvalDiff{ - Name: stateID, - Info: info, - Config: &resourceConfig, - Resource: n.Config, - Provider: &provider, - State: &state, - OutputState: &state, - Stub: true, + Addr: addr.Resource, + Config: n.Config, + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + OutputChange: &change, + OutputState: &state, + Stub: true, }, + &EvalWriteState{ - Name: stateID, - ResourceType: n.Config.Type, - Provider: n.ResolvedProvider, - Dependencies: stateDeps, - State: &state, + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + + // We must also save the planned change, so that expressions in + // other nodes, such as provider configurations and data resources, + // can work with the planned new value. + // + // This depends on the fact that Context.Refresh creates a + // temporary new empty changeset for the duration of its graph + // walk, and so this recorded change will be discarded immediately + // after the refresh walk completes. + &EvalWriteDiff{ + Addr: addr.Resource, + Change: &change, + ProviderSchema: &providerSchema, }, }, } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go index 0df223d9ea..734ec9e2b7 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go @@ -1,158 +1,87 @@ package terraform import ( - "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/provisioners" + "github.com/zclconf/go-cty/cty" ) // NodeValidatableResource represents a resource that is used for validation // only. type NodeValidatableResource struct { - *NodeAbstractCountResource -} - -// GraphNodeEvalable -func (n *NodeValidatableResource) EvalTree() EvalNode { - // Ensure we're validating - c := n.NodeAbstractCountResource - c.Validate = true - return c.EvalTree() -} - -// GraphNodeDynamicExpandable -func (n *NodeValidatableResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - // Grab the state which we read - state, lock := ctx.State() - lock.RLock() - defer lock.RUnlock() - - // Expand the resource count which must be available by now from EvalTree - count := 1 - if n.Config.RawCount.Value() != unknownValue() { - var err error - count, err = n.Config.Count() - if err != nil { - return nil, err - } - } - - // The concrete resource factory we'll use - concreteResource := func(a *NodeAbstractResource) dag.Vertex { - // Add the config and state since we don't do that via transforms - a.Config = n.Config - a.ResolvedProvider = n.ResolvedProvider - - return &NodeValidatableResourceInstance{ - NodeAbstractResource: a, - } - } - - // Start creating the steps - steps := []GraphTransformer{ - // Expand the count. - &ResourceCountTransformer{ - Concrete: concreteResource, - Count: count, - Addr: n.ResourceAddr(), - }, - - // Attach the state - &AttachStateTransformer{State: state}, - - // Targeting - &TargetsTransformer{ParsedTargets: n.Targets}, - - // Connect references so ordering is correct - &ReferenceTransformer{}, - - // Make sure there is a single root - &RootTransformer{}, - } - - // Build the graph - b := &BasicGraphBuilder{ - Steps: steps, - Validate: true, - Name: "NodeValidatableResource", - } - - return b.Build(ctx.Path()) -} - -// This represents a _single_ resource instance to validate. -type NodeValidatableResourceInstance struct { *NodeAbstractResource } -// GraphNodeEvalable -func (n *NodeValidatableResourceInstance) EvalTree() EvalNode { - addr := n.NodeAbstractResource.Addr +var ( + _ GraphNodeSubPath = (*NodeValidatableResource)(nil) + _ GraphNodeEvalable = (*NodeValidatableResource)(nil) + _ GraphNodeReferenceable = (*NodeValidatableResource)(nil) + _ GraphNodeReferencer = (*NodeValidatableResource)(nil) + _ GraphNodeResource = (*NodeValidatableResource)(nil) + _ GraphNodeAttachResourceConfig = (*NodeValidatableResource)(nil) +) - // Build the resource for eval - resource := &Resource{ - Name: addr.Name, - Type: addr.Type, - CountIndex: addr.Index, - } - if resource.CountIndex < 0 { - resource.CountIndex = 0 - } +// GraphNodeEvalable +func (n *NodeValidatableResource) EvalTree() EvalNode { + addr := n.ResourceAddr() + config := n.Config - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var config *ResourceConfig - var provider ResourceProvider + // Declare the variables will be used are used to pass values along + // the evaluation sequence below. These are written to via pointers + // passed to the EvalNodes. + var provider providers.Interface + var providerSchema *ProviderSchema + var configVal cty.Value seq := &EvalSequence{ Nodes: []EvalNode{ - &EvalValidateResourceSelfRef{ - Addr: &addr, - Config: &n.Config.RawConfig, - }, &EvalGetProvider{ - Name: n.ResolvedProvider, + Addr: n.ResolvedProvider, Output: &provider, - }, - &EvalInterpolate{ - Config: n.Config.RawConfig.Copy(), - Resource: resource, - Output: &config, + Schema: &providerSchema, }, &EvalValidateResource{ - Provider: &provider, - Config: &config, - ResourceName: n.Config.Name, - ResourceType: n.Config.Type, - ResourceMode: n.Config.Mode, + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + Config: config, + ConfigVal: &configVal, }, }, } - // Validate all the provisioners - for _, p := range n.Config.Provisioners { - var provisioner ResourceProvisioner - var connConfig *ResourceConfig - seq.Nodes = append( - seq.Nodes, - &EvalGetProvisioner{ - Name: p.Type, - Output: &provisioner, - }, - &EvalInterpolate{ - Config: p.RawConfig.Copy(), - Resource: resource, - Output: &config, - }, - &EvalInterpolate{ - Config: p.ConnInfo.Copy(), - Resource: resource, - Output: &connConfig, - }, - &EvalValidateProvisioner{ - Provisioner: &provisioner, - Config: &config, - ConnConfig: &connConfig, - }, - ) + if managed := n.Config.Managed; managed != nil { + hasCount := n.Config.Count != nil + + // Validate all the provisioners + for _, p := range managed.Provisioners { + var provisioner provisioners.Interface + var provisionerSchema *configschema.Block + + if p.Connection == nil { + p.Connection = config.Managed.Connection + } else if config.Managed.Connection != nil { + p.Connection.Config = configs.MergeBodies(config.Managed.Connection.Config, p.Connection.Config) + } + + seq.Nodes = append( + seq.Nodes, + &EvalGetProvisioner{ + Name: p.Type, + Output: &provisioner, + Schema: &provisionerSchema, + }, + &EvalValidateProvisioner{ + ResourceAddr: addr.Resource, + Provisioner: &provisioner, + Schema: &provisionerSchema, + Config: p, + ResourceHasCount: hasCount, + }, + ) + } } return seq diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go index cb61a4e3a6..1c302903d7 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go @@ -1,22 +1,44 @@ package terraform import ( - "fmt" - - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/dag" ) // NodeRootVariable represents a root variable input. type NodeRootVariable struct { - Config *config.Variable + Addr addrs.InputVariable + Config *configs.Variable } +var ( + _ GraphNodeSubPath = (*NodeRootVariable)(nil) + _ GraphNodeReferenceable = (*NodeRootVariable)(nil) + _ dag.GraphNodeDotter = (*NodeApplyableModuleVariable)(nil) +) + func (n *NodeRootVariable) Name() string { - result := fmt.Sprintf("var.%s", n.Config.Name) - return result + return n.Addr.String() +} + +// GraphNodeSubPath +func (n *NodeRootVariable) Path() addrs.ModuleInstance { + return addrs.RootModuleInstance } // GraphNodeReferenceable -func (n *NodeRootVariable) ReferenceableName() []string { - return []string{n.Name()} +func (n *NodeRootVariable) ReferenceableAddrs() []addrs.Referenceable { + return []addrs.Referenceable{n.Addr} +} + +// dag.GraphNodeDotter impl. +func (n *NodeRootVariable) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "note", + }, + } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/path.go b/vendor/github.com/hashicorp/terraform/terraform/path.go index 51dd4122bb..9757446bb6 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/path.go +++ b/vendor/github.com/hashicorp/terraform/terraform/path.go @@ -1,10 +1,17 @@ package terraform import ( - "strings" + "fmt" + + "github.com/hashicorp/terraform/addrs" ) -// PathCacheKey returns a cache key for a module path. -func PathCacheKey(path []string) string { - return strings.Join(path, "|") +// PathObjectCacheKey is like PathCacheKey but includes an additional name +// to be included in the key, for module-namespaced objects. +// +// The result of this function is guaranteed unique for any distinct pair +// of path and name, but is not guaranteed to be in any particular format +// and in particular should never be shown to end-users. +func PathObjectCacheKey(path addrs.ModuleInstance, objectName string) string { + return fmt.Sprintf("%s|%s", path.String(), objectName) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/plan.go b/vendor/github.com/hashicorp/terraform/terraform/plan.go index 30db19504d..af04c6cd46 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/plan.go +++ b/vendor/github.com/hashicorp/terraform/terraform/plan.go @@ -3,14 +3,13 @@ package terraform import ( "bytes" "encoding/gob" - "errors" "fmt" "io" - "log" "sync" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/version" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/configs" ) func init() { @@ -31,9 +30,9 @@ type Plan struct { // plan is applied. Diff *Diff - // Module represents the entire configuration that was present when this + // Config represents the entire configuration that was present when this // plan was created. - Module *module.Tree + Config *configs.Config // State is the Terraform state that was current when this plan was // created. @@ -44,7 +43,7 @@ type Plan struct { // Vars retains the variables that were set when creating the plan, so // that the same variables can be applied during apply. - Vars map[string]interface{} + Vars map[string]cty.Value // Targets, if non-empty, contains a set of resource address strings that // identify graph nodes that were selected as targets for plan. @@ -78,64 +77,6 @@ type Plan struct { once sync.Once } -// Context returns a Context with the data encapsulated in this plan. -// -// The following fields in opts are overridden by the plan: Config, -// Diff, Variables. -// -// If State is not provided, it is set from the plan. If it _is_ provided, -// it must be Equal to the state stored in plan, but may have a newer -// serial. -func (p *Plan) Context(opts *ContextOpts) (*Context, error) { - var err error - opts, err = p.contextOpts(opts) - if err != nil { - return nil, err - } - return NewContext(opts) -} - -// contextOpts mutates the given base ContextOpts in place to use input -// objects obtained from the receiving plan. -func (p *Plan) contextOpts(base *ContextOpts) (*ContextOpts, error) { - opts := base - - opts.Diff = p.Diff - opts.Module = p.Module - opts.Targets = p.Targets - opts.ProviderSHA256s = p.ProviderSHA256s - opts.Destroy = p.Destroy - - if opts.State == nil { - opts.State = p.State - } else if !opts.State.Equal(p.State) { - // Even if we're overriding the state, it should be logically equal - // to what's in plan. The only valid change to have made by the time - // we get here is to have incremented the serial. - // - // Due to the fact that serialization may change the representation of - // the state, there is little chance that these aren't actually equal. - // Log the error condition for reference, but continue with the state - // we have. - log.Println("[WARN] Plan state and ContextOpts state are not equal") - } - - thisVersion := version.String() - if p.TerraformVersion != "" && p.TerraformVersion != thisVersion { - return nil, fmt.Errorf( - "plan was created with a different version of Terraform (created with %s, but running %s)", - p.TerraformVersion, thisVersion, - ) - } - - opts.Variables = make(map[string]interface{}) - for k, v := range p.Vars { - opts.Variables[k] = v - } - - return opts, nil -} - func (p *Plan) String() string { buf := new(bytes.Buffer) buf.WriteString("DIFF:\n\n") @@ -158,7 +99,7 @@ func (p *Plan) init() { } if p.Vars == nil { - p.Vars = make(map[string]interface{}) + p.Vars = make(map[string]cty.Value) } }) } @@ -172,63 +113,10 @@ const planFormatVersion byte = 2 // ReadPlan reads a plan structure out of a reader in the format that // was written by WritePlan. func ReadPlan(src io.Reader) (*Plan, error) { - var result *Plan - var err error - n := 0 - - // Verify the magic bytes - magic := make([]byte, len(planFormatMagic)) - for n < len(magic) { - n, err = src.Read(magic[n:]) - if err != nil { - return nil, fmt.Errorf("error while reading magic bytes: %s", err) - } - } - if string(magic) != planFormatMagic { - return nil, fmt.Errorf("not a valid plan file") - } - - // Verify the version is something we can read - var formatByte [1]byte - n, err = src.Read(formatByte[:]) - if err != nil { - return nil, err - } - if n != len(formatByte) { - return nil, errors.New("failed to read plan version byte") - } - - if formatByte[0] != planFormatVersion { - return nil, fmt.Errorf("unknown plan file version: %d", formatByte[0]) - } - - dec := gob.NewDecoder(src) - if err := dec.Decode(&result); err != nil { - return nil, err - } - - return result, nil + return nil, fmt.Errorf("terraform.ReadPlan is no longer in use; use planfile.Open instead") } // WritePlan writes a plan somewhere in a binary format. func WritePlan(d *Plan, dst io.Writer) error { - // Write the magic bytes so we can determine the file format later - n, err := dst.Write([]byte(planFormatMagic)) - if err != nil { - return err - } - if n != len(planFormatMagic) { - return errors.New("failed to write plan format magic bytes") - } - - // Write a version byte so we can iterate on version at some point - n, err = dst.Write([]byte{planFormatVersion}) - if err != nil { - return err - } - if n != 1 { - return errors.New("failed to write plan version byte") - } - - return gob.NewEncoder(dst).Encode(d) + return fmt.Errorf("terraform.WritePlan is no longer in use; use planfile.Create instead") } diff --git a/vendor/github.com/hashicorp/terraform/terraform/provider_mock.go b/vendor/github.com/hashicorp/terraform/terraform/provider_mock.go new file mode 100644 index 0000000000..8c58240895 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/provider_mock.go @@ -0,0 +1,496 @@ +package terraform + +import ( + "encoding/json" + "fmt" + "sync" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/hcl2shim" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/tfdiags" +) + +var _ providers.Interface = (*MockProvider)(nil) + +// MockProvider implements providers.Interface but mocks out all the +// calls for testing purposes. +type MockProvider struct { + sync.Mutex + + // Anything you want, in case you need to store extra data with the mock. + Meta interface{} + + GetSchemaCalled bool + GetSchemaReturn *ProviderSchema // This is using ProviderSchema directly rather than providers.GetSchemaResponse for compatibility with old tests + + PrepareProviderConfigCalled bool + PrepareProviderConfigResponse providers.PrepareProviderConfigResponse + PrepareProviderConfigRequest providers.PrepareProviderConfigRequest + PrepareProviderConfigFn func(providers.PrepareProviderConfigRequest) providers.PrepareProviderConfigResponse + + ValidateResourceTypeConfigCalled bool + ValidateResourceTypeConfigTypeName string + ValidateResourceTypeConfigResponse providers.ValidateResourceTypeConfigResponse + ValidateResourceTypeConfigRequest providers.ValidateResourceTypeConfigRequest + ValidateResourceTypeConfigFn func(providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse + + ValidateDataSourceConfigCalled bool + ValidateDataSourceConfigTypeName string + ValidateDataSourceConfigResponse providers.ValidateDataSourceConfigResponse + ValidateDataSourceConfigRequest providers.ValidateDataSourceConfigRequest + ValidateDataSourceConfigFn func(providers.ValidateDataSourceConfigRequest) providers.ValidateDataSourceConfigResponse + + UpgradeResourceStateCalled bool + UpgradeResourceStateTypeName string + UpgradeResourceStateResponse providers.UpgradeResourceStateResponse + UpgradeResourceStateRequest providers.UpgradeResourceStateRequest + UpgradeResourceStateFn func(providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse + + ConfigureCalled bool + ConfigureResponse providers.ConfigureResponse + ConfigureRequest providers.ConfigureRequest + ConfigureNewFn func(providers.ConfigureRequest) providers.ConfigureResponse // Named ConfigureNewFn so we can still have the legacy ConfigureFn declared below + + StopCalled bool + StopFn func() error + StopResponse error + + ReadResourceCalled bool + ReadResourceResponse providers.ReadResourceResponse + ReadResourceRequest providers.ReadResourceRequest + ReadResourceFn func(providers.ReadResourceRequest) providers.ReadResourceResponse + + PlanResourceChangeCalled bool + PlanResourceChangeResponse providers.PlanResourceChangeResponse + PlanResourceChangeRequest providers.PlanResourceChangeRequest + PlanResourceChangeFn func(providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse + + ApplyResourceChangeCalled bool + ApplyResourceChangeResponse providers.ApplyResourceChangeResponse + ApplyResourceChangeRequest providers.ApplyResourceChangeRequest + ApplyResourceChangeFn func(providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse + + ImportResourceStateCalled bool + ImportResourceStateResponse providers.ImportResourceStateResponse + ImportResourceStateRequest providers.ImportResourceStateRequest + ImportResourceStateFn func(providers.ImportResourceStateRequest) providers.ImportResourceStateResponse + // Legacy return type for existing tests, which will be shimmed into an + // ImportResourceStateResponse if set + ImportStateReturn []*InstanceState + + ReadDataSourceCalled bool + ReadDataSourceResponse providers.ReadDataSourceResponse + ReadDataSourceRequest providers.ReadDataSourceRequest + ReadDataSourceFn func(providers.ReadDataSourceRequest) providers.ReadDataSourceResponse + + CloseCalled bool + CloseError error + + // Legacy callbacks: if these are set, we will shim incoming calls for + // new-style methods to these old-fashioned terraform.ResourceProvider + // mock callbacks, for the benefit of older tests that were written against + // the old mock API. + ValidateFn func(c *ResourceConfig) (ws []string, es []error) + ConfigureFn func(c *ResourceConfig) error + DiffFn func(info *InstanceInfo, s *InstanceState, c *ResourceConfig) (*InstanceDiff, error) + ApplyFn func(info *InstanceInfo, s *InstanceState, d *InstanceDiff) (*InstanceState, error) +} + +func (p *MockProvider) GetSchema() providers.GetSchemaResponse { + p.Lock() + defer p.Unlock() + p.GetSchemaCalled = true + return p.getSchema() +} + +func (p *MockProvider) getSchema() providers.GetSchemaResponse { + // This version of getSchema doesn't do any locking, so it's suitable to + // call from other methods of this mock as long as they are already + // holding the lock. + + ret := providers.GetSchemaResponse{ + Provider: providers.Schema{}, + DataSources: map[string]providers.Schema{}, + ResourceTypes: map[string]providers.Schema{}, + } + if p.GetSchemaReturn != nil { + ret.Provider.Block = p.GetSchemaReturn.Provider + for n, s := range p.GetSchemaReturn.DataSources { + ret.DataSources[n] = providers.Schema{ + Block: s, + } + } + for n, s := range p.GetSchemaReturn.ResourceTypes { + ret.ResourceTypes[n] = providers.Schema{ + Version: int64(p.GetSchemaReturn.ResourceTypeSchemaVersions[n]), + Block: s, + } + } + } + + return ret +} + +func (p *MockProvider) PrepareProviderConfig(r providers.PrepareProviderConfigRequest) providers.PrepareProviderConfigResponse { + p.Lock() + defer p.Unlock() + + p.PrepareProviderConfigCalled = true + p.PrepareProviderConfigRequest = r + if p.PrepareProviderConfigFn != nil { + return p.PrepareProviderConfigFn(r) + } + return p.PrepareProviderConfigResponse +} + +func (p *MockProvider) ValidateResourceTypeConfig(r providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse { + p.Lock() + defer p.Unlock() + + p.ValidateResourceTypeConfigCalled = true + p.ValidateResourceTypeConfigRequest = r + + if p.ValidateFn != nil { + resp := p.getSchema() + schema := resp.Provider.Block + rc := NewResourceConfigShimmed(r.Config, schema) + warns, errs := p.ValidateFn(rc) + ret := providers.ValidateResourceTypeConfigResponse{} + for _, warn := range warns { + ret.Diagnostics = ret.Diagnostics.Append(tfdiags.SimpleWarning(warn)) + } + for _, err := range errs { + ret.Diagnostics = ret.Diagnostics.Append(err) + } + } + if p.ValidateResourceTypeConfigFn != nil { + return p.ValidateResourceTypeConfigFn(r) + } + + return p.ValidateResourceTypeConfigResponse +} + +func (p *MockProvider) ValidateDataSourceConfig(r providers.ValidateDataSourceConfigRequest) providers.ValidateDataSourceConfigResponse { + p.Lock() + defer p.Unlock() + + p.ValidateDataSourceConfigCalled = true + p.ValidateDataSourceConfigRequest = r + + if p.ValidateDataSourceConfigFn != nil { + return p.ValidateDataSourceConfigFn(r) + } + + return p.ValidateDataSourceConfigResponse +} + +func (p *MockProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse { + p.Lock() + defer p.Unlock() + + p.UpgradeResourceStateCalled = true + p.UpgradeResourceStateRequest = r + + if p.UpgradeResourceStateFn != nil { + return p.UpgradeResourceStateFn(r) + } + + return p.UpgradeResourceStateResponse +} + +func (p *MockProvider) Configure(r providers.ConfigureRequest) providers.ConfigureResponse { + p.Lock() + defer p.Unlock() + + p.ConfigureCalled = true + p.ConfigureRequest = r + + if p.ConfigureFn != nil { + resp := p.getSchema() + schema := resp.Provider.Block + rc := NewResourceConfigShimmed(r.Config, schema) + ret := providers.ConfigureResponse{} + + err := p.ConfigureFn(rc) + if err != nil { + ret.Diagnostics = ret.Diagnostics.Append(err) + } + return ret + } + if p.ConfigureNewFn != nil { + return p.ConfigureNewFn(r) + } + + return p.ConfigureResponse +} + +func (p *MockProvider) Stop() error { + // We intentionally don't lock in this one because the whole point of this + // method is to be called concurrently with another operation that can + // be cancelled. The provider itself is responsible for handling + // any concurrency concerns in this case. + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopResponse +} + +func (p *MockProvider) ReadResource(r providers.ReadResourceRequest) providers.ReadResourceResponse { + p.Lock() + defer p.Unlock() + + p.ReadResourceCalled = true + p.ReadResourceRequest = r + + if p.ReadResourceFn != nil { + return p.ReadResourceFn(r) + } + + // make sure the NewState fits the schema + newState, err := p.GetSchemaReturn.ResourceTypes[r.TypeName].CoerceValue(p.ReadResourceResponse.NewState) + if err != nil { + panic(err) + } + resp := p.ReadResourceResponse + resp.NewState = newState + + return resp +} + +func (p *MockProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + p.Lock() + defer p.Unlock() + + p.PlanResourceChangeCalled = true + p.PlanResourceChangeRequest = r + + if p.DiffFn != nil { + ps := p.getSchema() + if ps.ResourceTypes == nil || ps.ResourceTypes[r.TypeName].Block == nil { + return providers.PlanResourceChangeResponse{ + Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Printf("mock provider has no schema for resource type %s", r.TypeName)), + } + } + schema := ps.ResourceTypes[r.TypeName].Block + info := &InstanceInfo{ + Type: r.TypeName, + } + priorState := NewInstanceStateShimmedFromValue(r.PriorState, 0) + cfg := NewResourceConfigShimmed(r.Config, schema) + + legacyDiff, err := p.DiffFn(info, priorState, cfg) + + var res providers.PlanResourceChangeResponse + res.PlannedState = r.ProposedNewState + if err != nil { + res.Diagnostics = res.Diagnostics.Append(err) + } + if legacyDiff != nil { + newVal, err := legacyDiff.ApplyToValue(r.PriorState, schema) + if err != nil { + res.Diagnostics = res.Diagnostics.Append(err) + } + + res.PlannedState = newVal + + var requiresNew []string + for attr, d := range legacyDiff.Attributes { + if d.RequiresNew { + requiresNew = append(requiresNew, attr) + } + } + requiresReplace, err := hcl2shim.RequiresReplace(requiresNew, schema.ImpliedType()) + if err != nil { + res.Diagnostics = res.Diagnostics.Append(err) + } + res.RequiresReplace = requiresReplace + } + return res + } + if p.PlanResourceChangeFn != nil { + return p.PlanResourceChangeFn(r) + } + + return p.PlanResourceChangeResponse +} + +func (p *MockProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + p.Lock() + p.ApplyResourceChangeCalled = true + p.ApplyResourceChangeRequest = r + p.Unlock() + + if p.ApplyFn != nil { + // ApplyFn is a special callback fashioned after our old provider + // interface, which expected to be given an actual diff rather than + // separate old/new values to apply. Therefore we need to approximate + // a diff here well enough that _most_ of our legacy ApplyFns in old + // tests still see the behavior they are expecting. New tests should + // not use this, and should instead use ApplyResourceChangeFn directly. + providerSchema := p.getSchema() + schema, ok := providerSchema.ResourceTypes[r.TypeName] + if !ok { + return providers.ApplyResourceChangeResponse{ + Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Errorf("no mocked schema available for resource type %s", r.TypeName)), + } + } + + info := &InstanceInfo{ + Type: r.TypeName, + } + + priorVal := r.PriorState + plannedVal := r.PlannedState + priorMap := hcl2shim.FlatmapValueFromHCL2(priorVal) + plannedMap := hcl2shim.FlatmapValueFromHCL2(plannedVal) + s := NewInstanceStateShimmedFromValue(priorVal, 0) + d := &InstanceDiff{ + Attributes: make(map[string]*ResourceAttrDiff), + } + if plannedMap == nil { // destroying, then + d.Destroy = true + // Destroy diffs don't have any attribute diffs + } else { + if priorMap == nil { // creating, then + // We'll just make an empty prior map to make things easier below. + priorMap = make(map[string]string) + } + + for k, new := range plannedMap { + old := priorMap[k] + newComputed := false + if new == config.UnknownVariableValue { + new = "" + newComputed = true + } + d.Attributes[k] = &ResourceAttrDiff{ + Old: old, + New: new, + NewComputed: newComputed, + Type: DiffAttrInput, // not generally used in tests, so just hard-coded + } + } + // Also need any attributes that were removed in "planned" + for k, old := range priorMap { + if _, ok := plannedMap[k]; ok { + continue + } + d.Attributes[k] = &ResourceAttrDiff{ + Old: old, + NewRemoved: true, + Type: DiffAttrInput, + } + } + } + newState, err := p.ApplyFn(info, s, d) + resp := providers.ApplyResourceChangeResponse{} + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + } + if newState != nil { + var newVal cty.Value + if newState != nil { + var err error + newVal, err = newState.AttrsAsObjectValue(schema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + } + } else { + // If apply returned a nil new state then that's the old way to + // indicate that the object was destroyed. Our new interface calls + // for that to be signalled as a null value. + newVal = cty.NullVal(schema.Block.ImpliedType()) + } + resp.NewState = newVal + } + + return resp + } + if p.ApplyResourceChangeFn != nil { + return p.ApplyResourceChangeFn(r) + } + + return p.ApplyResourceChangeResponse +} + +func (p *MockProvider) ImportResourceState(r providers.ImportResourceStateRequest) providers.ImportResourceStateResponse { + p.Lock() + defer p.Unlock() + + if p.ImportStateReturn != nil { + for _, is := range p.ImportStateReturn { + if is.Attributes == nil { + is.Attributes = make(map[string]string) + } + is.Attributes["id"] = is.ID + + typeName := is.Ephemeral.Type + // Use the requested type if the resource has no type of it's own. + // We still return the empty type, which will error, but this prevents a panic. + if typeName == "" { + typeName = r.TypeName + } + + schema := p.GetSchemaReturn.ResourceTypes[typeName] + if schema == nil { + panic("no schema found for " + typeName) + } + + private, err := json.Marshal(is.Meta) + if err != nil { + panic(err) + } + + state, err := hcl2shim.HCL2ValueFromFlatmap(is.Attributes, schema.ImpliedType()) + if err != nil { + panic(err) + } + + state, err = schema.CoerceValue(state) + if err != nil { + panic(err) + } + + p.ImportResourceStateResponse.ImportedResources = append( + p.ImportResourceStateResponse.ImportedResources, + providers.ImportedResource{ + TypeName: is.Ephemeral.Type, + State: state, + Private: private, + }) + } + } + + p.ImportResourceStateCalled = true + p.ImportResourceStateRequest = r + if p.ImportResourceStateFn != nil { + return p.ImportResourceStateFn(r) + } + + return p.ImportResourceStateResponse +} + +func (p *MockProvider) ReadDataSource(r providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + p.Lock() + defer p.Unlock() + + p.ReadDataSourceCalled = true + p.ReadDataSourceRequest = r + + if p.ReadDataSourceFn != nil { + return p.ReadDataSourceFn(r) + } + + return p.ReadDataSourceResponse +} + +func (p *MockProvider) Close() error { + p.CloseCalled = true + return p.CloseError +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/provisioner_mock.go b/vendor/github.com/hashicorp/terraform/terraform/provisioner_mock.go new file mode 100644 index 0000000000..f595891644 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/provisioner_mock.go @@ -0,0 +1,154 @@ +package terraform + +import ( + "fmt" + "sync" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + + "github.com/hashicorp/terraform/provisioners" +) + +var _ provisioners.Interface = (*MockProvisioner)(nil) + +// MockProvisioner implements provisioners.Interface but mocks out all the +// calls for testing purposes. +type MockProvisioner struct { + sync.Mutex + // Anything you want, in case you need to store extra data with the mock. + Meta interface{} + + GetSchemaCalled bool + GetSchemaResponse provisioners.GetSchemaResponse + + ValidateProvisionerConfigCalled bool + ValidateProvisionerConfigRequest provisioners.ValidateProvisionerConfigRequest + ValidateProvisionerConfigResponse provisioners.ValidateProvisionerConfigResponse + ValidateProvisionerConfigFn func(provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse + + ProvisionResourceCalled bool + ProvisionResourceRequest provisioners.ProvisionResourceRequest + ProvisionResourceResponse provisioners.ProvisionResourceResponse + ProvisionResourceFn func(provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse + + StopCalled bool + StopResponse error + StopFn func() error + + CloseCalled bool + CloseResponse error + CloseFn func() error + + // Legacy callbacks: if these are set, we will shim incoming calls for + // new-style methods to these old-fashioned terraform.ResourceProvider + // mock callbacks, for the benefit of older tests that were written against + // the old mock API. + ApplyFn func(rs *InstanceState, c *ResourceConfig) error +} + +func (p *MockProvisioner) GetSchema() provisioners.GetSchemaResponse { + p.Lock() + defer p.Unlock() + + p.GetSchemaCalled = true + return p.getSchema() +} + +// getSchema is the implementation of GetSchema, which can be called from other +// methods on MockProvisioner that may already be holding the lock. +func (p *MockProvisioner) getSchema() provisioners.GetSchemaResponse { + return p.GetSchemaResponse +} + +func (p *MockProvisioner) ValidateProvisionerConfig(r provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse { + p.Lock() + defer p.Unlock() + + p.ValidateProvisionerConfigCalled = true + p.ValidateProvisionerConfigRequest = r + if p.ValidateProvisionerConfigFn != nil { + return p.ValidateProvisionerConfigFn(r) + } + return p.ValidateProvisionerConfigResponse +} + +func (p *MockProvisioner) ProvisionResource(r provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse { + p.Lock() + defer p.Unlock() + + p.ProvisionResourceCalled = true + p.ProvisionResourceRequest = r + if p.ApplyFn != nil { + if !r.Config.IsKnown() { + panic(fmt.Sprintf("cannot provision with unknown value: %#v", r.Config)) + } + + schema := p.getSchema() + rc := NewResourceConfigShimmed(r.Config, schema.Provisioner) + connVal := r.Connection + connMap := map[string]string{} + + if !connVal.IsNull() && connVal.IsKnown() { + for it := connVal.ElementIterator(); it.Next(); { + ak, av := it.Element() + name := ak.AsString() + + if !av.IsKnown() || av.IsNull() { + continue + } + + av, _ = convert.Convert(av, cty.String) + connMap[name] = av.AsString() + } + } + + // We no longer pass the full instance state to a provisioner, so we'll + // construct a partial one that should be good enough for what existing + // test mocks need. + is := &InstanceState{ + Ephemeral: EphemeralState{ + ConnInfo: connMap, + }, + } + var resp provisioners.ProvisionResourceResponse + err := p.ApplyFn(is, rc) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + } + return resp + } + if p.ProvisionResourceFn != nil { + fn := p.ProvisionResourceFn + p.Unlock() + return fn(r) + } + + return p.ProvisionResourceResponse +} + +func (p *MockProvisioner) Stop() error { + // We intentionally don't lock in this one because the whole point of this + // method is to be called concurrently with another operation that can + // be cancelled. The provisioner itself is responsible for handling + // any concurrency concerns in this case. + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopResponse +} + +func (p *MockProvisioner) Close() error { + p.Lock() + defer p.Unlock() + + p.CloseCalled = true + if p.CloseFn != nil { + return p.CloseFn() + } + + return p.CloseResponse +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource.go b/vendor/github.com/hashicorp/terraform/terraform/resource.go index 2f5ebb5e72..994009ac75 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/resource.go +++ b/vendor/github.com/hashicorp/terraform/terraform/resource.go @@ -2,14 +2,20 @@ package terraform import ( "fmt" + "log" "reflect" "sort" "strconv" "strings" - "github.com/hashicorp/terraform/config" "github.com/mitchellh/copystructure" "github.com/mitchellh/reflectwalk" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/hcl2shim" + "github.com/hashicorp/terraform/configs/configschema" ) // ResourceProvisionerConfig is used to pair a provisioner @@ -25,9 +31,10 @@ type ResourceProvisionerConfig struct { ConnInfo *config.RawConfig } -// Resource encapsulates a resource, its configuration, its provider, -// its current state, and potentially a desired diff from the state it -// wants to reach. +// Resource is a legacy way to identify a particular resource instance. +// +// New code should use addrs.ResourceInstance instead. This is still here +// only for codepaths that haven't been updated yet. type Resource struct { // These are all used by the new EvalNode stuff. Name string @@ -47,6 +54,31 @@ type Resource struct { Flags ResourceFlag } +// NewResource constructs a legacy Resource object from an +// addrs.ResourceInstance value. +// +// This is provided to shim to old codepaths that haven't been updated away +// from this type yet. Since this old type is not able to represent instances +// that have string keys, this function will panic if given a resource address +// that has a string key. +func NewResource(addr addrs.ResourceInstance) *Resource { + ret := &Resource{ + Name: addr.Resource.Name, + Type: addr.Resource.Type, + } + + if addr.Key != addrs.NoKey { + switch tk := addr.Key.(type) { + case addrs.IntKey: + ret.CountIndex = int(tk) + default: + panic(fmt.Errorf("resource instance with key %#v is not supported", addr.Key)) + } + } + + return ret +} + // ResourceKind specifies what kind of instance we're working with, whether // its a primary instance, a tainted instance, or an orphan. type ResourceFlag byte @@ -72,20 +104,53 @@ type InstanceInfo struct { uniqueExtra string } -// HumanId is a unique Id that is human-friendly and useful for UI elements. -func (i *InstanceInfo) HumanId() string { - if i == nil { - return "" +// NewInstanceInfo constructs an InstanceInfo from an addrs.AbsResourceInstance. +// +// InstanceInfo is a legacy type, and uses of it should be gradually replaced +// by direct use of addrs.AbsResource or addrs.AbsResourceInstance as +// appropriate. +// +// The legacy InstanceInfo type cannot represent module instances with instance +// keys, so this function will panic if given such a path. Uses of this type +// should all be removed or replaced before implementing "count" and "for_each" +// arguments on modules in order to avoid such panics. +// +// This legacy type also cannot represent resource instances with string +// instance keys. It will panic if the given key is not either NoKey or an +// IntKey. +func NewInstanceInfo(addr addrs.AbsResourceInstance) *InstanceInfo { + // We need an old-style []string module path for InstanceInfo. + path := make([]string, len(addr.Module)) + for i, step := range addr.Module { + if step.InstanceKey != addrs.NoKey { + panic("NewInstanceInfo cannot convert module instance with key") + } + path[i] = step.Name } - if len(i.ModulePath) <= 1 { - return i.Id + // This is a funny old meaning of "id" that is no longer current. It should + // not be used for anything users might see. Note that it does not include + // a representation of the resource mode, and so it's impossible to + // determine from an InstanceInfo alone whether it is a managed or data + // resource that is being referred to. + id := fmt.Sprintf("%s.%s", addr.Resource.Resource.Type, addr.Resource.Resource.Name) + if addr.Resource.Resource.Mode == addrs.DataResourceMode { + id = "data." + id + } + if addr.Resource.Key != addrs.NoKey { + switch k := addr.Resource.Key.(type) { + case addrs.IntKey: + id = id + fmt.Sprintf(".%d", int(k)) + default: + panic(fmt.Sprintf("NewInstanceInfo cannot convert resource instance with %T instance key", addr.Resource.Key)) + } } - return fmt.Sprintf( - "module.%s.%s", - strings.Join(i.ModulePath[1:], "."), - i.Id) + return &InstanceInfo{ + Id: id, + ModulePath: path, + Type: addr.Resource.Resource.Type, + } } // ResourceAddress returns the address of the resource that the receiver is describing. @@ -128,18 +193,9 @@ func (i *InstanceInfo) ResourceAddress() *ResourceAddress { return addr } -func (i *InstanceInfo) uniqueId() string { - prefix := i.HumanId() - if v := i.uniqueExtra; v != "" { - prefix += " " + v - } - - return prefix -} - -// ResourceConfig holds the configuration given for a resource. This is -// done instead of a raw `map[string]interface{}` type so that rich -// methods can be added to it to make dealing with it easier. +// ResourceConfig is a legacy type that was formerly used to represent +// interpolatable configuration blocks. It is now only used to shim to old +// APIs that still use this type, via NewResourceConfigShimmed. type ResourceConfig struct { ComputedKeys []string Raw map[string]interface{} @@ -155,6 +211,112 @@ func NewResourceConfig(c *config.RawConfig) *ResourceConfig { return result } +// NewResourceConfigShimmed wraps a cty.Value of object type in a legacy +// ResourceConfig object, so that it can be passed to older APIs that expect +// this wrapping. +// +// The returned ResourceConfig is already interpolated and cannot be +// re-interpolated. It is, therefore, useful only to functions that expect +// an already-populated ResourceConfig which they then treat as read-only. +// +// If the given value is not of an object type that conforms to the given +// schema then this function will panic. +func NewResourceConfigShimmed(val cty.Value, schema *configschema.Block) *ResourceConfig { + if !val.Type().IsObjectType() { + panic(fmt.Errorf("NewResourceConfigShimmed given %#v; an object type is required", val.Type())) + } + ret := &ResourceConfig{} + + legacyVal := hcl2shim.ConfigValueFromHCL2Block(val, schema) + if legacyVal != nil { + ret.Config = legacyVal + + // Now we need to walk through our structure and find any unknown values, + // producing the separate list ComputedKeys to represent these. We use the + // schema here so that we can preserve the expected invariant + // that an attribute is always either wholly known or wholly unknown, while + // a child block can be partially unknown. + ret.ComputedKeys = newResourceConfigShimmedComputedKeys(val, schema, "") + } else { + ret.Config = make(map[string]interface{}) + } + ret.Raw = ret.Config + + return ret +} + +// newResourceConfigShimmedComputedKeys finds all of the unknown values in the +// given object, which must conform to the given schema, returning them in +// the format that's expected for ResourceConfig.ComputedKeys. +func newResourceConfigShimmedComputedKeys(obj cty.Value, schema *configschema.Block, prefix string) []string { + var ret []string + ty := obj.Type() + + if schema == nil { + log.Printf("[WARN] NewResourceConfigShimmed: can't identify computed keys because no schema is available") + return nil + } + + for attrName := range schema.Attributes { + if !ty.HasAttribute(attrName) { + // Should never happen, but we'll tolerate it anyway + continue + } + + attrVal := obj.GetAttr(attrName) + if !attrVal.IsWhollyKnown() { + ret = append(ret, prefix+attrName) + } + } + + for typeName, blockS := range schema.BlockTypes { + if !ty.HasAttribute(typeName) { + // Should never happen, but we'll tolerate it anyway + continue + } + + blockVal := obj.GetAttr(typeName) + if blockVal.IsNull() || !blockVal.IsKnown() { + continue + } + + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + keys := newResourceConfigShimmedComputedKeys(blockVal, &blockS.Block, fmt.Sprintf("%s%s.", prefix, typeName)) + ret = append(ret, keys...) + case configschema.NestingList, configschema.NestingSet: + // Producing computed keys items for sets is not really useful + // since they are not usefully addressable anyway, but we'll treat + // them like lists just so that ret.ComputedKeys accounts for them + // all. Our legacy system didn't support sets here anyway, so + // treating them as lists is the most accurate translation. Although + // set traversal isn't in any particular order, it is _stable_ as + // long as the list isn't mutated, and so we know we'll see the + // same order here as hcl2shim.ConfigValueFromHCL2 would've seen + // inside NewResourceConfigShimmed above. + i := 0 + for it := blockVal.ElementIterator(); it.Next(); i++ { + _, subVal := it.Element() + subPrefix := fmt.Sprintf("%s.%s%d.", typeName, prefix, i) + keys := newResourceConfigShimmedComputedKeys(subVal, &blockS.Block, subPrefix) + ret = append(ret, keys...) + } + case configschema.NestingMap: + for it := blockVal.ElementIterator(); it.Next(); { + subK, subVal := it.Element() + subPrefix := fmt.Sprintf("%s.%s%s.", typeName, prefix, subK.AsString()) + keys := newResourceConfigShimmedComputedKeys(subVal, &blockS.Block, subPrefix) + ret = append(ret, keys...) + } + default: + // Should never happen, since the above is exhaustive. + panic(fmt.Errorf("unsupported block nesting type %s", blockS.Nesting)) + } + } + + return ret +} + // DeepCopy performs a deep copy of the configuration. This makes it safe // to modify any of the structures that are part of the resource config without // affecting the original configuration. @@ -374,6 +536,14 @@ func (c *ResourceConfig) get( // refactor is complete. func (c *ResourceConfig) interpolateForce() { if c.raw == nil { + // If we don't have a lowercase "raw" but we _do_ have the uppercase + // Raw populated then this indicates that we're recieving a shim + // ResourceConfig created by NewResourceConfigShimmed, which is already + // fully evaluated and thus this function doesn't need to do anything. + if c.Raw != nil { + return + } + var err error c.raw, err = config.NewRawConfig(make(map[string]interface{})) if err != nil { diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go index a64f5d846c..156ecf5c0e 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go @@ -7,8 +7,10 @@ import ( "strconv" "strings" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/configs" ) // ResourceAddress is a way of identifying an individual resource (or, @@ -109,30 +111,47 @@ func (r *ResourceAddress) WholeModuleAddress() *ResourceAddress { } } -// MatchesConfig returns true if the receiver matches the given -// configuration resource within the given configuration module. +// MatchesResourceConfig returns true if the receiver matches the given +// configuration resource within the given _static_ module path. Note that +// the module path in a resource address is a _dynamic_ module path, and +// multiple dynamic resource paths may map to a single static path if +// count and for_each are in use on module calls. // // Since resource configuration blocks represent all of the instances of // a multi-instance resource, the index of the address (if any) is not // considered. -func (r *ResourceAddress) MatchesConfig(mod *module.Tree, rc *config.Resource) bool { +func (r *ResourceAddress) MatchesResourceConfig(path addrs.Module, rc *configs.Resource) bool { if r.HasResourceSpec() { - if r.Mode != rc.Mode || r.Type != rc.Type || r.Name != rc.Name { + // FIXME: Some ugliness while we are between worlds. Functionality + // in "addrs" should eventually replace this ResourceAddress idea + // completely, but for now we'll need to translate to the old + // way of representing resource modes. + switch r.Mode { + case config.ManagedResourceMode: + if rc.Mode != addrs.ManagedResourceMode { + return false + } + case config.DataResourceMode: + if rc.Mode != addrs.DataResourceMode { + return false + } + } + if r.Type != rc.Type || r.Name != rc.Name { return false } } addrPath := r.Path - cfgPath := mod.Path() // normalize if len(addrPath) == 0 { addrPath = nil } - if len(cfgPath) == 0 { - cfgPath = nil + if len(path) == 0 { + path = nil } - return reflect.DeepEqual(addrPath, cfgPath) + rawPath := []string(path) + return reflect.DeepEqual(addrPath, rawPath) } // stateId returns the ID that this resource should be entered with @@ -270,6 +289,144 @@ func ParseResourceAddressForInstanceDiff(path []string, key string) (*ResourceAd return addr, nil } +// NewLegacyResourceAddress creates a ResourceAddress from a new-style +// addrs.AbsResource value. +// +// This is provided for shimming purposes so that we can still easily call into +// older functions that expect the ResourceAddress type. +func NewLegacyResourceAddress(addr addrs.AbsResource) *ResourceAddress { + ret := &ResourceAddress{ + Type: addr.Resource.Type, + Name: addr.Resource.Name, + } + + switch addr.Resource.Mode { + case addrs.ManagedResourceMode: + ret.Mode = config.ManagedResourceMode + case addrs.DataResourceMode: + ret.Mode = config.DataResourceMode + default: + panic(fmt.Errorf("cannot shim %s to legacy config.ResourceMode value", addr.Resource.Mode)) + } + + path := make([]string, len(addr.Module)) + for i, step := range addr.Module { + if step.InstanceKey != addrs.NoKey { + // At the time of writing this can't happen because we don't + // ket generate keyed module instances. This legacy codepath must + // be removed before we can support "count" and "for_each" for + // modules. + panic(fmt.Errorf("cannot shim module instance step with key %#v to legacy ResourceAddress.Path", step.InstanceKey)) + } + + path[i] = step.Name + } + ret.Path = path + ret.Index = -1 + + return ret +} + +// NewLegacyResourceInstanceAddress creates a ResourceAddress from a new-style +// addrs.AbsResource value. +// +// This is provided for shimming purposes so that we can still easily call into +// older functions that expect the ResourceAddress type. +func NewLegacyResourceInstanceAddress(addr addrs.AbsResourceInstance) *ResourceAddress { + ret := &ResourceAddress{ + Type: addr.Resource.Resource.Type, + Name: addr.Resource.Resource.Name, + } + + switch addr.Resource.Resource.Mode { + case addrs.ManagedResourceMode: + ret.Mode = config.ManagedResourceMode + case addrs.DataResourceMode: + ret.Mode = config.DataResourceMode + default: + panic(fmt.Errorf("cannot shim %s to legacy config.ResourceMode value", addr.Resource.Resource.Mode)) + } + + path := make([]string, len(addr.Module)) + for i, step := range addr.Module { + if step.InstanceKey != addrs.NoKey { + // At the time of writing this can't happen because we don't + // ket generate keyed module instances. This legacy codepath must + // be removed before we can support "count" and "for_each" for + // modules. + panic(fmt.Errorf("cannot shim module instance step with key %#v to legacy ResourceAddress.Path", step.InstanceKey)) + } + + path[i] = step.Name + } + ret.Path = path + + if addr.Resource.Key == addrs.NoKey { + ret.Index = -1 + } else if ik, ok := addr.Resource.Key.(addrs.IntKey); ok { + ret.Index = int(ik) + } else { + panic(fmt.Errorf("cannot shim resource instance with key %#v to legacy ResourceAddress.Index", addr.Resource.Key)) + } + + return ret +} + +// AbsResourceInstanceAddr converts the receiver, a legacy resource address, to +// the new resource address type addrs.AbsResourceInstance. +// +// This method can be used only on an address that has a resource specification. +// It will panic if called on a module-path-only ResourceAddress. Use +// method HasResourceSpec to check before calling, in contexts where it is +// unclear. +// +// addrs.AbsResourceInstance does not represent the "tainted" and "deposed" +// states, and so if these are present on the receiver then they are discarded. +// +// This is provided for shimming purposes so that we can easily adapt functions +// that are returning the legacy ResourceAddress type, for situations where +// the new type is required. +func (addr *ResourceAddress) AbsResourceInstanceAddr() addrs.AbsResourceInstance { + if !addr.HasResourceSpec() { + panic("AbsResourceInstanceAddr called on ResourceAddress with no resource spec") + } + + ret := addrs.AbsResourceInstance{ + Module: addr.ModuleInstanceAddr(), + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Type: addr.Type, + Name: addr.Name, + }, + }, + } + + switch addr.Mode { + case config.ManagedResourceMode: + ret.Resource.Resource.Mode = addrs.ManagedResourceMode + case config.DataResourceMode: + ret.Resource.Resource.Mode = addrs.DataResourceMode + default: + panic(fmt.Errorf("cannot shim %s to addrs.ResourceMode value", addr.Mode)) + } + + if addr.Index != -1 { + ret.Resource.Key = addrs.IntKey(addr.Index) + } + + return ret +} + +// ModuleInstanceAddr returns the module path portion of the receiver as a +// addrs.ModuleInstance value. +func (addr *ResourceAddress) ModuleInstanceAddr() addrs.ModuleInstance { + path := make(addrs.ModuleInstance, len(addr.Path)) + for i, name := range addr.Path { + path[i] = addrs.ModuleInstanceStep{Name: name} + } + return path +} + // Contains returns true if and only if the given node is contained within // the receiver. // diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go index 93fd14fc80..3455ad88c5 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go @@ -3,8 +3,10 @@ package terraform import ( "fmt" - multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/tfdiags" + "github.com/hashicorp/terraform/plugin/discovery" + "github.com/hashicorp/terraform/providers" ) // ResourceProvider is an interface that must be implemented by any @@ -30,13 +32,12 @@ type ResourceProvider interface { // resource or data source has the SchemaAvailable flag set. GetSchema(*ProviderSchemaRequest) (*ProviderSchema, error) - // Input is called to ask the provider to ask the user for input - // for completing the configuration if necesarry. + // Input was used prior to v0.12 to ask the provider to prompt the user + // for input to complete the configuration. // - // This may or may not be called, so resource provider writers shouldn't - // rely on this being available to set some default values for validate - // later. Example of a situation where this wouldn't be called is if - // the user is not using a TTY. + // From v0.12 onwards this method is never called because Terraform Core + // is able to handle the necessary input logic itself based on the + // schema returned from GetSchema. Input(UIInput, *ResourceConfig) (*ResourceConfig, error) // Validate is called once at the beginning with the raw configuration @@ -170,18 +171,6 @@ type ResourceProvider interface { ReadDataApply(*InstanceInfo, *InstanceDiff) (*InstanceState, error) } -// ResourceProviderError may be returned when creating a Context if the -// required providers cannot be satisfied. This error can then be used to -// format a more useful message for the user. -type ResourceProviderError struct { - Errors []error -} - -func (e *ResourceProviderError) Error() string { - // use multierror to format the default output - return multierror.Append(nil, e.Errors...).Error() -} - // ResourceProviderCloser is an interface that providers that can close // connections that aren't needed anymore must implement. type ResourceProviderCloser interface { @@ -296,13 +285,35 @@ func ProviderHasDataSource(p ResourceProvider, n string) bool { // This should be called only with configurations that have passed calls // to config.Validate(), which ensures that all of the given version // constraints are valid. It will panic if any invalid constraints are present. -func resourceProviderFactories(resolver ResourceProviderResolver, reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, error) { +func resourceProviderFactories(resolver providers.Resolver, reqd discovery.PluginRequirements) (map[string]providers.Factory, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics ret, errs := resolver.ResolveProviders(reqd) if errs != nil { - return nil, &ResourceProviderError{ - Errors: errs, + diags = diags.Append( + tfdiags.Sourceless(tfdiags.Error, + "Could not satisfy plugin requirements", + errPluginInit, + ), + ) + + for _, err := range errs { + diags = diags.Append(err) } + + return nil, diags } return ret, nil } + +const errPluginInit = ` +Plugin reinitialization required. Please run "terraform init". + +Plugins are external binaries that Terraform uses to access and manipulate +resources. The configuration provided requires plugins which can't be located, +don't satisfy the version constraints, or are otherwise incompatible. + +Terraform automatically discovers provider requirements from your +configuration, including providers used in child modules. To see the +requirements and constraints from each module, run "terraform providers". +` diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go index 361ec1ec09..2743dd7e9e 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go @@ -1,9 +1,21 @@ package terraform +import ( + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/provisioners" +) + // ResourceProvisioner is an interface that must be implemented by any // resource provisioner: the thing that initializes resources in // a Terraform configuration. type ResourceProvisioner interface { + // GetConfigSchema returns the schema for the provisioner type's main + // configuration block. This is called prior to Validate to enable some + // basic structural validation to be performed automatically and to allow + // the configuration to be properly extracted from potentially-ambiguous + // configuration file formats. + GetConfigSchema() (*configschema.Block, error) + // Validate is called once at the beginning with the raw // configuration (no interpolation done) and can return a list of warnings // and/or errors. @@ -52,3 +64,7 @@ type ResourceProvisionerCloser interface { // ResourceProvisionerFactory is a function type that creates a new instance // of a resource provisioner. type ResourceProvisionerFactory func() (ResourceProvisioner, error) + +// ProvisionerFactory is a function type that creates a new instance +// of a provisioners.Interface. +type ProvisionerFactory = provisioners.Factory diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go index f471a5182b..7b88cf7333 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go @@ -1,6 +1,10 @@ package terraform -import "sync" +import ( + "sync" + + "github.com/hashicorp/terraform/configs/configschema" +) // MockResourceProvisioner implements ResourceProvisioner but mocks out all the // calls for testing purposes. @@ -9,6 +13,10 @@ type MockResourceProvisioner struct { // Anything you want, in case you need to store extra data with the mock. Meta interface{} + GetConfigSchemaCalled bool + GetConfigSchemaReturnSchema *configschema.Block + GetConfigSchemaReturnError error + ApplyCalled bool ApplyOutput UIOutput ApplyState *InstanceState @@ -27,6 +35,13 @@ type MockResourceProvisioner struct { StopReturnError error } +var _ ResourceProvisioner = (*MockResourceProvisioner)(nil) + +func (p *MockResourceProvisioner) GetConfigSchema() (*configschema.Block, error) { + p.GetConfigSchemaCalled = true + return p.GetConfigSchemaReturnSchema, p.GetConfigSchemaReturnError +} + func (p *MockResourceProvisioner) Validate(c *ResourceConfig) ([]string, []error) { p.Lock() defer p.Unlock() diff --git a/vendor/github.com/hashicorp/terraform/terraform/schemas.go b/vendor/github.com/hashicorp/terraform/terraform/schemas.go index ec46efcf7d..62991c82d0 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/schemas.go +++ b/vendor/github.com/hashicorp/terraform/terraform/schemas.go @@ -1,18 +1,239 @@ package terraform import ( - "github.com/hashicorp/terraform/config/configschema" + "fmt" + "log" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) +// Schemas is a container for various kinds of schema that Terraform needs +// during processing. type Schemas struct { - Providers ProviderSchemas + Providers map[string]*ProviderSchema + Provisioners map[string]*configschema.Block +} + +// ProviderSchema returns the entire ProviderSchema object that was produced +// by the plugin for the given provider, or nil if no such schema is available. +// +// It's usually better to go use the more precise methods offered by type +// Schemas to handle this detail automatically. +func (ss *Schemas) ProviderSchema(typeName string) *ProviderSchema { + if ss.Providers == nil { + return nil + } + return ss.Providers[typeName] +} + +// ProviderConfig returns the schema for the provider configuration of the +// given provider type, or nil if no such schema is available. +func (ss *Schemas) ProviderConfig(typeName string) *configschema.Block { + ps := ss.ProviderSchema(typeName) + if ps == nil { + return nil + } + return ps.Provider +} + +// ResourceTypeConfig returns the schema for the configuration of a given +// resource type belonging to a given provider type, or nil of no such +// schema is available. +// +// In many cases the provider type is inferrable from the resource type name, +// but this is not always true because users can override the provider for +// a resource using the "provider" meta-argument. Therefore it's important to +// always pass the correct provider name, even though it many cases it feels +// redundant. +func (ss *Schemas) ResourceTypeConfig(providerType string, resourceMode addrs.ResourceMode, resourceType string) (block *configschema.Block, schemaVersion uint64) { + ps := ss.ProviderSchema(providerType) + if ps == nil || ps.ResourceTypes == nil { + return nil, 0 + } + return ps.SchemaForResourceType(resourceMode, resourceType) +} + +// ProvisionerConfig returns the schema for the configuration of a given +// provisioner, or nil of no such schema is available. +func (ss *Schemas) ProvisionerConfig(name string) *configschema.Block { + return ss.Provisioners[name] } -// ProviderSchemas is a map from provider names to provider schemas. +// LoadSchemas searches the given configuration, state and plan (any of which +// may be nil) for constructs that have an associated schema, requests the +// necessary schemas from the given component factory (which must _not_ be nil), +// and returns a single object representing all of the necessary schemas. // -// The names in this map are the direct plugin name (e.g. "aws") rather than -// any alias name (e.g. "aws.foo"), since. -type ProviderSchemas map[string]*ProviderSchema +// If an error is returned, it may be a wrapped tfdiags.Diagnostics describing +// errors across multiple separate objects. Errors here will usually indicate +// either misbehavior on the part of one of the providers or of the provider +// protocol itself. When returned with errors, the returned schemas object is +// still valid but may be incomplete. +func LoadSchemas(config *configs.Config, state *states.State, components contextComponentFactory) (*Schemas, error) { + schemas := &Schemas{ + Providers: map[string]*ProviderSchema{}, + Provisioners: map[string]*configschema.Block{}, + } + var diags tfdiags.Diagnostics + + newDiags := loadProviderSchemas(schemas.Providers, config, state, components) + diags = diags.Append(newDiags) + newDiags = loadProvisionerSchemas(schemas.Provisioners, config, components) + diags = diags.Append(newDiags) + + return schemas, diags.Err() +} + +func loadProviderSchemas(schemas map[string]*ProviderSchema, config *configs.Config, state *states.State, components contextComponentFactory) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + ensure := func(typeName string) { + if _, exists := schemas[typeName]; exists { + return + } + + log.Printf("[TRACE] LoadSchemas: retrieving schema for provider type %q", typeName) + provider, err := components.ResourceProvider(typeName, "early/"+typeName) + if err != nil { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[typeName] = &ProviderSchema{} + diags = diags.Append( + fmt.Errorf("Failed to instantiate provider %q to obtain schema: %s", typeName, err), + ) + return + } + defer func() { + provider.Close() + }() + + resp := provider.GetSchema() + if resp.Diagnostics.HasErrors() { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[typeName] = &ProviderSchema{} + diags = diags.Append( + fmt.Errorf("Failed to retrieve schema from provider %q: %s", typeName, resp.Diagnostics.Err()), + ) + return + } + + s := &ProviderSchema{ + Provider: resp.Provider.Block, + ResourceTypes: make(map[string]*configschema.Block), + DataSources: make(map[string]*configschema.Block), + + ResourceTypeSchemaVersions: make(map[string]uint64), + } + + if resp.Provider.Version < 0 { + // We're not using the version numbers here yet, but we'll check + // for validity anyway in case we start using them in future. + diags = diags.Append( + fmt.Errorf("invalid negative schema version provider configuration for provider %q", typeName), + ) + } + + for t, r := range resp.ResourceTypes { + s.ResourceTypes[t] = r.Block + s.ResourceTypeSchemaVersions[t] = uint64(r.Version) + if r.Version < 0 { + diags = diags.Append( + fmt.Errorf("invalid negative schema version for resource type %s in provider %q", t, typeName), + ) + } + } + + for t, d := range resp.DataSources { + s.DataSources[t] = d.Block + if d.Version < 0 { + // We're not using the version numbers here yet, but we'll check + // for validity anyway in case we start using them in future. + diags = diags.Append( + fmt.Errorf("invalid negative schema version for data source %s in provider %q", t, typeName), + ) + } + } + + schemas[typeName] = s + } + + if config != nil { + for _, typeName := range config.ProviderTypes() { + ensure(typeName) + } + } + + if state != nil { + needed := providers.AddressedTypesAbs(state.ProviderAddrs()) + for _, typeName := range needed { + ensure(typeName) + } + } + + return diags +} + +func loadProvisionerSchemas(schemas map[string]*configschema.Block, config *configs.Config, components contextComponentFactory) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + ensure := func(name string) { + if _, exists := schemas[name]; exists { + return + } + + log.Printf("[TRACE] LoadSchemas: retrieving schema for provisioner %q", name) + provisioner, err := components.ResourceProvisioner(name, "early/"+name) + if err != nil { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[name] = &configschema.Block{} + diags = diags.Append( + fmt.Errorf("Failed to instantiate provisioner %q to obtain schema: %s", name, err), + ) + return + } + defer func() { + if closer, ok := provisioner.(ResourceProvisionerCloser); ok { + closer.Close() + } + }() + + resp := provisioner.GetSchema() + if resp.Diagnostics.HasErrors() { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[name] = &configschema.Block{} + diags = diags.Append( + fmt.Errorf("Failed to retrieve schema from provisioner %q: %s", name, resp.Diagnostics.Err()), + ) + return + } + + schemas[name] = resp.Provisioner + } + + if config != nil { + for _, rc := range config.Module.ManagedResources { + for _, pc := range rc.Managed.Provisioners { + ensure(pc.Type) + } + } + + // Must also visit our child modules, recursively. + for _, cc := range config.Children { + childDiags := loadProvisionerSchemas(schemas, cc, components) + diags = diags.Append(childDiags) + } + } + + return diags +} // ProviderSchema represents the schema for a provider's own configuration // and the configuration for some or all of its resources and data sources. @@ -24,6 +245,29 @@ type ProviderSchema struct { Provider *configschema.Block ResourceTypes map[string]*configschema.Block DataSources map[string]*configschema.Block + + ResourceTypeSchemaVersions map[string]uint64 +} + +// SchemaForResourceType attempts to find a schema for the given mode and type. +// Returns nil if no such schema is available. +func (ps *ProviderSchema) SchemaForResourceType(mode addrs.ResourceMode, typeName string) (schema *configschema.Block, version uint64) { + switch mode { + case addrs.ManagedResourceMode: + return ps.ResourceTypes[typeName], ps.ResourceTypeSchemaVersions[typeName] + case addrs.DataResourceMode: + // Data resources don't have schema versions right now, since state is discarded for each refresh + return ps.DataSources[typeName], 0 + default: + // Shouldn't happen, because the above cases are comprehensive. + return nil, 0 + } +} + +// SchemaForResourceAddr attempts to find a schema for the mode and type from +// the given resource address. Returns nil if no such schema is available. +func (ps *ProviderSchema) SchemaForResourceAddr(addr addrs.Resource) (schema *configschema.Block, version uint64) { + return ps.SchemaForResourceType(addr.Mode, addr.Type) } // ProviderSchemaRequest is used to describe to a ResourceProvider which diff --git a/vendor/github.com/hashicorp/terraform/terraform/semantics.go b/vendor/github.com/hashicorp/terraform/terraform/semantics.go deleted file mode 100644 index 20f1d8a274..0000000000 --- a/vendor/github.com/hashicorp/terraform/terraform/semantics.go +++ /dev/null @@ -1,132 +0,0 @@ -package terraform - -import ( - "fmt" - "strings" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/dag" -) - -// GraphSemanticChecker is the interface that semantic checks across -// the entire Terraform graph implement. -// -// The graph should NOT be modified by the semantic checker. -type GraphSemanticChecker interface { - Check(*dag.Graph) error -} - -// UnorderedSemanticCheckRunner is an implementation of GraphSemanticChecker -// that runs a list of SemanticCheckers against the vertices of the graph -// in no specified order. -type UnorderedSemanticCheckRunner struct { - Checks []SemanticChecker -} - -func (sc *UnorderedSemanticCheckRunner) Check(g *dag.Graph) error { - var err error - for _, v := range g.Vertices() { - for _, check := range sc.Checks { - if e := check.Check(g, v); e != nil { - err = multierror.Append(err, e) - } - } - } - - return err -} - -// SemanticChecker is the interface that semantic checks across the -// Terraform graph implement. Errors are accumulated. Even after an error -// is returned, child vertices in the graph will still be visited. -// -// The graph should NOT be modified by the semantic checker. -// -// The order in which vertices are visited is left unspecified, so the -// semantic checks should not rely on that. -type SemanticChecker interface { - Check(*dag.Graph, dag.Vertex) error -} - -// smcUserVariables does all the semantic checks to verify that the -// variables given satisfy the configuration itself. -func smcUserVariables(c *config.Config, vs map[string]interface{}) []error { - var errs []error - - cvs := make(map[string]*config.Variable) - for _, v := range c.Variables { - cvs[v.Name] = v - } - - // Check that all required variables are present - required := make(map[string]struct{}) - for _, v := range c.Variables { - if v.Required() { - required[v.Name] = struct{}{} - } - } - for k, _ := range vs { - delete(required, k) - } - if len(required) > 0 { - for k, _ := range required { - errs = append(errs, fmt.Errorf( - "Required variable not set: %s", k)) - } - } - - // Check that types match up - for name, proposedValue := range vs { - // Check for "map.key" fields. These stopped working with Terraform - // 0.7 but we do this to surface a better error message informing - // the user what happened. - if idx := strings.Index(name, "."); idx > 0 { - key := name[:idx] - if _, ok := cvs[key]; ok { - errs = append(errs, fmt.Errorf( - "%s: Overriding map keys with the format `name.key` is no "+ - "longer allowed. You may still override keys by setting "+ - "`name = { key = value }`. The maps will be merged. This "+ - "behavior appeared in 0.7.0.", name)) - continue - } - } - - schema, ok := cvs[name] - if !ok { - continue - } - - declaredType := schema.Type() - - switch declaredType { - case config.VariableTypeString: - switch proposedValue.(type) { - case string: - continue - } - case config.VariableTypeMap: - switch v := proposedValue.(type) { - case map[string]interface{}: - continue - case []map[string]interface{}: - // if we have a list of 1 map, it will get coerced later as needed - if len(v) == 1 { - continue - } - } - case config.VariableTypeList: - switch proposedValue.(type) { - case []interface{}: - continue - } - } - errs = append(errs, fmt.Errorf("variable %s should be type %s, got %s", - name, declaredType.Printable(), hclTypeName(proposedValue))) - } - - // TODO(mitchellh): variables that are unknown - - return errs -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/state.go b/vendor/github.com/hashicorp/terraform/terraform/state.go index 04b14a6597..092b690799 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/state.go +++ b/vendor/github.com/hashicorp/terraform/terraform/state.go @@ -16,12 +16,23 @@ import ( "strings" "sync" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/go-uuid" - "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/errwrap" + multierror "github.com/hashicorp/go-multierror" + uuid "github.com/hashicorp/go-uuid" + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" "github.com/mitchellh/copystructure" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/hcl2shim" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/tfdiags" tfversion "github.com/hashicorp/terraform/version" ) @@ -33,26 +44,38 @@ const ( // rootModulePath is the path of the root module var rootModulePath = []string{"root"} +// normalizeModulePath transforms a legacy module path (which may or may not +// have a redundant "root" label at the start of it) into an +// addrs.ModuleInstance representing the same module. +// +// For legacy reasons, different parts of Terraform disagree about whether the +// root module has the path []string{} or []string{"root"}, and so this +// function accepts both and trims off the "root". An implication of this is +// that it's not possible to actually have a module call in the root module +// that is itself named "root", since that would be ambiguous. +// // normalizeModulePath takes a raw module path and returns a path that // has the rootModulePath prepended to it. If I could go back in time I // would've never had a rootModulePath (empty path would be root). We can // still fix this but thats a big refactor that my branch doesn't make sense // for. Instead, this function normalizes paths. -func normalizeModulePath(p []string) []string { - k := len(rootModulePath) +func normalizeModulePath(p []string) addrs.ModuleInstance { + // FIXME: Remove this once everyone is using addrs.ModuleInstance. - // If we already have a root module prefix, we're done - if len(p) >= len(rootModulePath) { - if reflect.DeepEqual(p[:k], rootModulePath) { - return p - } + if len(p) > 0 && p[0] == "root" { + p = p[1:] } - // None? Prefix it - result := make([]string, len(rootModulePath)+len(p)) - copy(result, rootModulePath) - copy(result[k:], p) - return result + ret := make(addrs.ModuleInstance, len(p)) + for i, name := range p { + // For now we don't actually support modules with multiple instances + // identified by keys, so we just treat every path element as a + // step with no key. + ret[i] = addrs.ModuleInstanceStep{ + Name: name, + } + } + return ret } // State keeps track of a snapshot state-of-the-world that Terraform @@ -138,21 +161,43 @@ func (s *State) children(path []string) []*ModuleState { // // This should be the preferred method to add module states since it // allows us to optimize lookups later as well as control sorting. -func (s *State) AddModule(path []string) *ModuleState { +func (s *State) AddModule(path addrs.ModuleInstance) *ModuleState { s.Lock() defer s.Unlock() return s.addModule(path) } -func (s *State) addModule(path []string) *ModuleState { +func (s *State) addModule(path addrs.ModuleInstance) *ModuleState { // check if the module exists first m := s.moduleByPath(path) if m != nil { return m } - m = &ModuleState{Path: path} + // Lower the new-style address into a legacy-style address. + // This requires that none of the steps have instance keys, which is + // true for all addresses at the time of implementing this because + // "count" and "for_each" are not yet implemented for modules. + // For the purposes of state, the legacy address format also includes + // a redundant extra prefix element "root". It is important to include + // this because the "prune" method will remove any module that has a + // path length less than one, and other parts of the state code will + // trim off the first element indiscriminately. + legacyPath := make([]string, len(path)+1) + legacyPath[0] = "root" + for i, step := range path { + if step.InstanceKey != addrs.NoKey { + // FIXME: Once the rest of Terraform is ready to use count and + // for_each, remove all of this and just write the addrs.ModuleInstance + // value itself into the ModuleState. + panic("state cannot represent modules with count or for_each keys") + } + + legacyPath[i+1] = step.Name + } + + m = &ModuleState{Path: legacyPath} m.init() s.Modules = append(s.Modules, m) s.sort() @@ -162,7 +207,7 @@ func (s *State) addModule(path []string) *ModuleState { // ModuleByPath is used to lookup the module state for the given path. // This should be the preferred lookup mechanism as it allows for future // lookup optimizations. -func (s *State) ModuleByPath(path []string) *ModuleState { +func (s *State) ModuleByPath(path addrs.ModuleInstance) *ModuleState { if s == nil { return nil } @@ -172,7 +217,7 @@ func (s *State) ModuleByPath(path []string) *ModuleState { return s.moduleByPath(path) } -func (s *State) moduleByPath(path []string) *ModuleState { +func (s *State) moduleByPath(path addrs.ModuleInstance) *ModuleState { for _, mod := range s.Modules { if mod == nil { continue @@ -180,97 +225,14 @@ func (s *State) moduleByPath(path []string) *ModuleState { if mod.Path == nil { panic("missing module path") } - if reflect.DeepEqual(mod.Path, path) { + modPath := normalizeModulePath(mod.Path) + if modPath.String() == path.String() { return mod } } return nil } -// ModuleOrphans returns all the module orphans in this state by -// returning their full paths. These paths can be used with ModuleByPath -// to return the actual state. -func (s *State) ModuleOrphans(path []string, c *config.Config) [][]string { - s.Lock() - defer s.Unlock() - - return s.moduleOrphans(path, c) - -} - -func (s *State) moduleOrphans(path []string, c *config.Config) [][]string { - // direct keeps track of what direct children we have both in our config - // and in our state. childrenKeys keeps track of what isn't an orphan. - direct := make(map[string]struct{}) - childrenKeys := make(map[string]struct{}) - if c != nil { - for _, m := range c.Modules { - childrenKeys[m.Name] = struct{}{} - direct[m.Name] = struct{}{} - } - } - - // Go over the direct children and find any that aren't in our keys. - var orphans [][]string - for _, m := range s.children(path) { - key := m.Path[len(m.Path)-1] - - // Record that we found this key as a direct child. We use this - // later to find orphan nested modules. - direct[key] = struct{}{} - - // If we have a direct child still in our config, it is not an orphan - if _, ok := childrenKeys[key]; ok { - continue - } - - orphans = append(orphans, m.Path) - } - - // Find the orphans that are nested... - for _, m := range s.Modules { - if m == nil { - continue - } - - // We only want modules that are at least grandchildren - if len(m.Path) < len(path)+2 { - continue - } - - // If it isn't part of our tree, continue - if !reflect.DeepEqual(path, m.Path[:len(path)]) { - continue - } - - // If we have the direct child, then just skip it. - key := m.Path[len(path)] - if _, ok := direct[key]; ok { - continue - } - - orphanPath := m.Path[:len(path)+1] - - // Don't double-add if we've already added this orphan (which can happen if - // there are multiple nested sub-modules that get orphaned together). - alreadyAdded := false - for _, o := range orphans { - if reflect.DeepEqual(o, orphanPath) { - alreadyAdded = true - break - } - } - if alreadyAdded { - continue - } - - // Add this orphan - orphans = append(orphans, orphanPath) - } - - return orphans -} - // Empty returns true if the state is empty. func (s *State) Empty() bool { if s == nil { @@ -443,7 +405,7 @@ func (s *State) removeModule(path []string, v *ModuleState) { func (s *State) removeResource(path []string, v *ResourceState) { // Get the module this resource lives in. If it doesn't exist, we're done. - mod := s.moduleByPath(path) + mod := s.moduleByPath(normalizeModulePath(path)) if mod == nil { return } @@ -487,7 +449,7 @@ func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState // RootModule returns the ModuleState for the root module func (s *State) RootModule() *ModuleState { - root := s.ModuleByPath(rootModulePath) + root := s.ModuleByPath(addrs.RootModuleInstance) if root == nil { panic("missing root module") } @@ -522,7 +484,7 @@ func (s *State) equal(other *State) bool { } for _, m := range s.Modules { // This isn't very optimal currently but works. - otherM := other.moduleByPath(m.Path) + otherM := other.moduleByPath(normalizeModulePath(m.Path)) if otherM == nil { return false } @@ -681,8 +643,8 @@ func (s *State) init() { s.Version = StateVersion } - if s.moduleByPath(rootModulePath) == nil { - s.addModule(rootModulePath) + if s.moduleByPath(addrs.RootModuleInstance) == nil { + s.addModule(addrs.RootModuleInstance) } s.ensureHasLineage() @@ -811,13 +773,9 @@ func (s *State) String() string { // BackendState stores the configuration to connect to a remote backend. type BackendState struct { - Type string `json:"type"` // Backend type - Config map[string]interface{} `json:"config"` // Backend raw config - - // Hash is the hash code to uniquely identify the original source - // configuration. We use this to detect when there is a change in - // configuration even when "type" isn't changed. - Hash uint64 `json:"hash"` + Type string `json:"type"` // Backend type + ConfigRaw json.RawMessage `json:"config"` // Backend raw config + Hash uint64 `json:"hash"` // Hash of portion of configuration from config files } // Empty returns true if BackendState has no state. @@ -825,25 +783,50 @@ func (s *BackendState) Empty() bool { return s == nil || s.Type == "" } -// Rehash returns a unique content hash for this backend's configuration -// as a uint64 value. -// The Hash stored in the backend state needs to match the config itself, but -// we need to compare the backend config after it has been combined with all -// options. -// This function must match the implementation used by config.Backend. -func (s *BackendState) Rehash() uint64 { +// Config decodes the type-specific configuration object using the provided +// schema and returns the result as a cty.Value. +// +// An error is returned if the stored configuration does not conform to the +// given schema. +func (s *BackendState) Config(schema *configschema.Block) (cty.Value, error) { + ty := schema.ImpliedType() if s == nil { - return 0 + return cty.NullVal(ty), nil } + return ctyjson.Unmarshal(s.ConfigRaw, ty) +} - cfg := config.Backend{ - Type: s.Type, - RawConfig: &config.RawConfig{ - Raw: s.Config, - }, +// SetConfig replaces (in-place) the type-specific configuration object using +// the provided value and associated schema. +// +// An error is returned if the given value does not conform to the implied +// type of the schema. +func (s *BackendState) SetConfig(val cty.Value, schema *configschema.Block) error { + ty := schema.ImpliedType() + buf, err := ctyjson.Marshal(val, ty) + if err != nil { + return err } + s.ConfigRaw = buf + return nil +} - return cfg.Rehash() +// ForPlan produces an alternative representation of the reciever that is +// suitable for storing in a plan. The current workspace must additionally +// be provided, to be stored alongside the backend configuration. +// +// The backend configuration schema is required in order to properly +// encode the backend-specific configuration settings. +func (s *BackendState) ForPlan(schema *configschema.Block, workspaceName string) (*plans.Backend, error) { + if s == nil { + return nil, nil + } + + configVal, err := s.Config(schema) + if err != nil { + return nil, errwrap.Wrapf("failed to decode backend config: {{err}}", err) + } + return plans.NewBackend(s.Type, configVal, schema, workspaceName) } // RemoteState is used to track the information about a remote @@ -1089,58 +1072,64 @@ func (m *ModuleState) IsDescendent(other *ModuleState) bool { // Orphans returns a list of keys of resources that are in the State // but aren't present in the configuration itself. Hence, these keys // represent the state of resources that are orphans. -func (m *ModuleState) Orphans(c *config.Config) []string { +func (m *ModuleState) Orphans(c *configs.Module) []addrs.ResourceInstance { m.Lock() defer m.Unlock() - keys := make(map[string]struct{}) - for k := range m.Resources { - keys[k] = struct{}{} - } - + inConfig := make(map[string]struct{}) if c != nil { - for _, r := range c.Resources { - delete(keys, r.Id()) - - for k := range keys { - if strings.HasPrefix(k, r.Id()+".") { - delete(keys, k) - } - } + for _, r := range c.ManagedResources { + inConfig[r.Addr().String()] = struct{}{} + } + for _, r := range c.DataResources { + inConfig[r.Addr().String()] = struct{}{} } } - result := make([]string, 0, len(keys)) - for k := range keys { - result = append(result, k) - } + var result []addrs.ResourceInstance + for k := range m.Resources { + // Since we've not yet updated state to use our new address format, + // we need to do some shimming here. + legacyAddr, err := parseResourceAddressInternal(k) + if err != nil { + // Suggests that the user tampered with the state, since we always + // generate valid internal addresses. + log.Printf("ModuleState has invalid resource key %q. Ignoring.", k) + continue + } + addr := legacyAddr.AbsResourceInstanceAddr().Resource + compareKey := addr.Resource.String() // compare by resource address, ignoring instance key + if _, exists := inConfig[compareKey]; !exists { + result = append(result, addr) + } + } return result } // RemovedOutputs returns a list of outputs that are in the State but aren't // present in the configuration itself. -func (m *ModuleState) RemovedOutputs(c *config.Config) []string { - m.Lock() - defer m.Unlock() - - keys := make(map[string]struct{}) - for k := range m.Outputs { - keys[k] = struct{}{} +func (s *ModuleState) RemovedOutputs(outputs map[string]*configs.Output) []addrs.OutputValue { + if outputs == nil { + // If we got no output map at all then we'll just treat our set of + // configured outputs as empty, since that suggests that they've all + // been removed by removing their containing module. + outputs = make(map[string]*configs.Output) } - if c != nil { - for _, o := range c.Outputs { - delete(keys, o.Name) - } - } + s.Lock() + defer s.Unlock() - result := make([]string, 0, len(keys)) - for k := range keys { - result = append(result, k) + var ret []addrs.OutputValue + for n := range s.Outputs { + if _, declared := outputs[n]; !declared { + ret = append(ret, addrs.OutputValue{ + Name: n, + }) + } } - return result + return ret } // View returns a view with the given resource prefix. @@ -1543,6 +1532,24 @@ func (s *ResourceState) Untaint() { } } +// ProviderAddr returns the provider address for the receiver, by parsing the +// string representation saved in state. An error can be returned if the +// value in state is corrupt. +func (s *ResourceState) ProviderAddr() (addrs.AbsProviderConfig, error) { + var diags tfdiags.Diagnostics + + str := s.Provider + traversal, travDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(travDiags) + if travDiags.HasErrors() { + return addrs.AbsProviderConfig{}, diags.Err() + } + + addr, addrDiags := addrs.ParseAbsProviderConfig(traversal) + diags = diags.Append(addrDiags) + return addr, diags.Err() +} + func (s *ResourceState) init() { s.Lock() defer s.Unlock() @@ -1651,6 +1658,51 @@ func (s *InstanceState) init() { s.Ephemeral.init() } +// NewInstanceStateShimmedFromValue is a shim method to lower a new-style +// object value representing the attributes of an instance object into the +// legacy InstanceState representation. +// +// This is for shimming to old components only and should not be used in new code. +func NewInstanceStateShimmedFromValue(state cty.Value, schemaVersion int) *InstanceState { + attrs := hcl2shim.FlatmapValueFromHCL2(state) + return &InstanceState{ + ID: attrs["id"], + Attributes: attrs, + Meta: map[string]interface{}{ + "schema_version": schemaVersion, + }, + } +} + +// AttrsAsObjectValue shims from the legacy InstanceState representation to +// a new-style cty object value representation of the state attributes, using +// the given type for guidance. +// +// The given type must be the implied type of the schema of the resource type +// of the object whose state is being converted, or the result is undefined. +// +// This is for shimming from old components only and should not be used in +// new code. +func (s *InstanceState) AttrsAsObjectValue(ty cty.Type) (cty.Value, error) { + if s == nil { + // if the state is nil, we need to construct a complete cty.Value with + // null attributes, rather than a single cty.NullVal(ty) + s = &InstanceState{} + } + + if s.Attributes == nil { + s.Attributes = map[string]string{} + } + + // make sure ID is included in the attributes. The InstanceState.ID value + // takes precedence. + if s.ID != "" { + s.Attributes["id"] = s.ID + } + + return hcl2shim.HCL2ValueFromFlatmap(s.Attributes, ty) +} + // Copy all the Fields from another InstanceState func (s *InstanceState) Set(from *InstanceState) { s.Lock() @@ -1787,13 +1839,19 @@ func (s *InstanceState) MergeDiff(d *InstanceDiff) *InstanceState { } func (s *InstanceState) String() string { + notCreated := "" + + if s == nil { + return notCreated + } + s.Lock() defer s.Unlock() var buf bytes.Buffer - if s == nil || s.ID == "" { - return "" + if s.ID == "" { + return notCreated } buf.WriteString(fmt.Sprintf("ID = %s\n", s.ID)) @@ -2187,19 +2245,6 @@ func (s moduleStateSort) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -// StateCompatible returns an error if the state is not compatible with the -// current version of terraform. -func CheckStateVersion(state *State) error { - if state == nil { - return nil - } - - if state.FromFutureTerraform() { - return fmt.Errorf(stateInvalidTerraformVersionErr, state.TFVersion) - } - return nil -} - const stateValidateErrMultiModule = ` Multiple modules with the same path: %s @@ -2208,11 +2253,3 @@ in your state file that point to the same module. This will cause Terraform to behave in unexpected and error prone ways and is invalid. Please back up and modify your state file manually to resolve this. ` - -const stateInvalidTerraformVersionErr = ` -Terraform doesn't allow running any operations against a state -that was written by a future Terraform version. The state is -reporting it is written by Terraform '%s' - -Please run at least that version of Terraform to continue. -` diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_add.go b/vendor/github.com/hashicorp/terraform/terraform/state_add.go deleted file mode 100644 index 116373032f..0000000000 --- a/vendor/github.com/hashicorp/terraform/terraform/state_add.go +++ /dev/null @@ -1,374 +0,0 @@ -package terraform - -import "fmt" - -// Add adds the item in the state at the given address. -// -// The item can be a ModuleState, ResourceState, or InstanceState. Depending -// on the item type, the address may or may not be valid. For example, a -// module cannot be moved to a resource address, however a resource can be -// moved to a module address (it retains the same name, under that resource). -// -// The item can also be a []*ModuleState, which is the case for nested -// modules. In this case, Add will expect the zero-index to be the top-most -// module to add and will only nest children from there. For semantics, this -// is equivalent to module => module. -// -// The full semantics of Add: -// -// ┌───────────────────┬───────────────────┬───────────────────┐ -// │ Module Address │ Resource Address │ Instance Address │ -// ┌─────────────────┼───────────────────┼───────────────────┼───────────────────┤ -// │ ModuleState │ ✓ │ x │ x │ -// ├─────────────────┼───────────────────┼───────────────────┼───────────────────┤ -// │ ResourceState │ ✓ │ ✓ │ maybe* │ -// ├─────────────────┼───────────────────┼───────────────────┼───────────────────┤ -// │ Instance State │ ✓ │ ✓ │ ✓ │ -// └─────────────────┴───────────────────┴───────────────────┴───────────────────┘ -// -// *maybe - Resources can be added at an instance address only if the resource -// represents a single instance (primary). Example: -// "aws_instance.foo" can be moved to "aws_instance.bar.tainted" -// -func (s *State) Add(fromAddrRaw string, toAddrRaw string, raw interface{}) error { - // Parse the address - - toAddr, err := ParseResourceAddress(toAddrRaw) - if err != nil { - return err - } - - // Parse the from address - fromAddr, err := ParseResourceAddress(fromAddrRaw) - if err != nil { - return err - } - - // Determine the types - from := detectValueAddLoc(raw) - to := detectAddrAddLoc(toAddr) - - // Find the function to do this - fromMap, ok := stateAddFuncs[from] - if !ok { - return fmt.Errorf("invalid source to add to state: %T", raw) - } - f, ok := fromMap[to] - if !ok { - return fmt.Errorf("invalid destination: %s (%d)", toAddr, to) - } - - // Call the migrator - if err := f(s, fromAddr, toAddr, raw); err != nil { - return err - } - - // Prune the state - s.prune() - return nil -} - -func stateAddFunc_Module_Module(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error { - // raw can be either *ModuleState or []*ModuleState. The former means - // we're moving just one module. The latter means we're moving a module - // and children. - root := raw - var rest []*ModuleState - if list, ok := raw.([]*ModuleState); ok { - // We need at least one item - if len(list) == 0 { - return fmt.Errorf("module move with no value to: %s", addr) - } - - // The first item is always the root - root = list[0] - if len(list) > 1 { - rest = list[1:] - } - } - - // Get the actual module state - src := root.(*ModuleState).deepcopy() - - // If the target module exists, it is an error - path := append([]string{"root"}, addr.Path...) - if s.ModuleByPath(path) != nil { - return fmt.Errorf("module target is not empty: %s", addr) - } - - // Create it and copy our outputs and dependencies - mod := s.AddModule(path) - mod.Outputs = src.Outputs - mod.Dependencies = src.Dependencies - - // Go through the resources perform an add for each of those - for k, v := range src.Resources { - resourceKey, err := ParseResourceStateKey(k) - if err != nil { - return err - } - - // Update the resource address for this - addrCopy := *addr - addrCopy.Type = resourceKey.Type - addrCopy.Name = resourceKey.Name - addrCopy.Index = resourceKey.Index - addrCopy.Mode = resourceKey.Mode - - // Perform an add - if err := s.Add(fromAddr.String(), addrCopy.String(), v); err != nil { - return err - } - } - - // Add all the children if we have them - for _, item := range rest { - // If item isn't a descendent of our root, then ignore it - if !src.IsDescendent(item) { - continue - } - - // It is! Strip the leading prefix and attach that to our address - extra := item.Path[len(src.Path):] - addrCopy := addr.Copy() - addrCopy.Path = append(addrCopy.Path, extra...) - - // Add it - s.Add(fromAddr.String(), addrCopy.String(), item) - } - - return nil -} - -func stateAddFunc_Resource_Module( - s *State, from, to *ResourceAddress, raw interface{}) error { - // Build the more specific to addr - addr := *to - addr.Type = from.Type - addr.Name = from.Name - - return s.Add(from.String(), addr.String(), raw) -} - -func stateAddFunc_Resource_Resource(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error { - // raw can be either *ResourceState or []*ResourceState. The former means - // we're moving just one resource. The latter means we're moving a count - // of resources. - if list, ok := raw.([]*ResourceState); ok { - // We need at least one item - if len(list) == 0 { - return fmt.Errorf("resource move with no value to: %s", addr) - } - - // If there is an index, this is an error since we can't assign - // a set of resources to a single index - if addr.Index >= 0 && len(list) > 1 { - return fmt.Errorf( - "multiple resources can't be moved to a single index: "+ - "%s => %s", fromAddr, addr) - } - - // Add each with a specific index - for i, rs := range list { - addrCopy := addr.Copy() - addrCopy.Index = i - - if err := s.Add(fromAddr.String(), addrCopy.String(), rs); err != nil { - return err - } - } - - return nil - } - - src := raw.(*ResourceState).deepcopy() - - // Initialize the resource - resourceRaw, exists := stateAddInitAddr(s, addr) - if exists { - return fmt.Errorf("resource exists and not empty: %s", addr) - } - resource := resourceRaw.(*ResourceState) - resource.Type = src.Type - resource.Dependencies = src.Dependencies - resource.Provider = src.Provider - - // Move the primary - if src.Primary != nil { - addrCopy := *addr - addrCopy.InstanceType = TypePrimary - addrCopy.InstanceTypeSet = true - if err := s.Add(fromAddr.String(), addrCopy.String(), src.Primary); err != nil { - return err - } - } - - // Move all deposed - if len(src.Deposed) > 0 { - resource.Deposed = src.Deposed - } - - return nil -} - -func stateAddFunc_Instance_Instance(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error { - src := raw.(*InstanceState).DeepCopy() - - // Create the instance - instanceRaw, _ := stateAddInitAddr(s, addr) - instance := instanceRaw.(*InstanceState) - - // Set it - instance.Set(src) - - return nil -} - -func stateAddFunc_Instance_Module( - s *State, from, to *ResourceAddress, raw interface{}) error { - addr := *to - addr.Type = from.Type - addr.Name = from.Name - - return s.Add(from.String(), addr.String(), raw) -} - -func stateAddFunc_Instance_Resource( - s *State, from, to *ResourceAddress, raw interface{}) error { - addr := *to - addr.InstanceType = TypePrimary - addr.InstanceTypeSet = true - - return s.Add(from.String(), addr.String(), raw) -} - -// stateAddFunc is the type of function for adding an item to a state -type stateAddFunc func(s *State, from, to *ResourceAddress, item interface{}) error - -// stateAddFuncs has the full matrix mapping of the state adders. -var stateAddFuncs map[stateAddLoc]map[stateAddLoc]stateAddFunc - -func init() { - stateAddFuncs = map[stateAddLoc]map[stateAddLoc]stateAddFunc{ - stateAddModule: { - stateAddModule: stateAddFunc_Module_Module, - }, - stateAddResource: { - stateAddModule: stateAddFunc_Resource_Module, - stateAddResource: stateAddFunc_Resource_Resource, - }, - stateAddInstance: { - stateAddInstance: stateAddFunc_Instance_Instance, - stateAddModule: stateAddFunc_Instance_Module, - stateAddResource: stateAddFunc_Instance_Resource, - }, - } -} - -// stateAddLoc is an enum to represent the location where state is being -// moved from/to. We use this for quick lookups in a function map. -type stateAddLoc uint - -const ( - stateAddInvalid stateAddLoc = iota - stateAddModule - stateAddResource - stateAddInstance -) - -// detectAddrAddLoc detects the state type for the given address. This -// function is specifically not unit tested since we consider the State.Add -// functionality to be comprehensive enough to cover this. -func detectAddrAddLoc(addr *ResourceAddress) stateAddLoc { - if addr.Name == "" { - return stateAddModule - } - - if !addr.InstanceTypeSet { - return stateAddResource - } - - return stateAddInstance -} - -// detectValueAddLoc determines the stateAddLoc value from the raw value -// that is some State structure. -func detectValueAddLoc(raw interface{}) stateAddLoc { - switch raw.(type) { - case *ModuleState: - return stateAddModule - case []*ModuleState: - return stateAddModule - case *ResourceState: - return stateAddResource - case []*ResourceState: - return stateAddResource - case *InstanceState: - return stateAddInstance - default: - return stateAddInvalid - } -} - -// stateAddInitAddr takes a ResourceAddress and creates the non-existing -// resources up to that point, returning the empty (or existing) interface -// at that address. -func stateAddInitAddr(s *State, addr *ResourceAddress) (interface{}, bool) { - addType := detectAddrAddLoc(addr) - - // Get the module - path := append([]string{"root"}, addr.Path...) - exists := true - mod := s.ModuleByPath(path) - if mod == nil { - mod = s.AddModule(path) - exists = false - } - if addType == stateAddModule { - return mod, exists - } - - // Add the resource - resourceKey := (&ResourceStateKey{ - Name: addr.Name, - Type: addr.Type, - Index: addr.Index, - Mode: addr.Mode, - }).String() - exists = true - resource, ok := mod.Resources[resourceKey] - if !ok { - resource = &ResourceState{Type: addr.Type} - resource.init() - mod.Resources[resourceKey] = resource - exists = false - } - if addType == stateAddResource { - return resource, exists - } - - // Get the instance - exists = true - instance := &InstanceState{} - switch addr.InstanceType { - case TypePrimary, TypeTainted: - if v := resource.Primary; v != nil { - instance = resource.Primary - } else { - exists = false - } - case TypeDeposed: - idx := addr.Index - if addr.Index < 0 { - idx = 0 - } - if len(resource.Deposed) > idx { - instance = resource.Deposed[idx] - } else { - resource.Deposed = append(resource.Deposed, instance) - exists = false - } - } - - return instance, exists -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform.go b/vendor/github.com/hashicorp/terraform/terraform/transform.go index 0e47f208a7..fd3f5c7daa 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform.go @@ -38,13 +38,18 @@ type graphTransformerMulti struct { } func (t *graphTransformerMulti) Transform(g *Graph) error { + var lastStepStr string for _, t := range t.Transforms { + log.Printf("[TRACE] (graphTransformerMulti) Executing graph transform %T", t) if err := t.Transform(g); err != nil { return err } - log.Printf( - "[TRACE] Graph after step %T:\n\n%s", - t, g.StringWithNodeTypes()) + if thisStepStr := g.StringWithNodeTypes(); thisStepStr != lastStepStr { + log.Printf("[TRACE] (graphTransformerMulti) Completed graph transform %T with new graph:\n%s------", t, thisStepStr) + lastStepStr = thisStepStr + } else { + log.Printf("[TRACE] (graphTransformerMulti) Completed graph transform %T (no changes)", t) + } } return nil diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go index 39cf097aec..897a7e7917 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go @@ -1,7 +1,8 @@ package terraform import ( - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" ) // GraphNodeAttachProvider is an interface that must be implemented by nodes @@ -11,8 +12,8 @@ type GraphNodeAttachProvider interface { GraphNodeSubPath // ProviderName with no module prefix. Example: "aws". - ProviderName() string + ProviderAddr() addrs.AbsProviderConfig // Sets the configuration - AttachProvider(*config.ProviderConfig) + AttachProvider(*configs.Provider) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go index f2ee37e56b..03f8564d7e 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go @@ -1,35 +1,32 @@ package terraform import ( - "fmt" "log" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/dag" ) // GraphNodeAttachResourceConfig is an interface that must be implemented by nodes // that want resource configurations attached. type GraphNodeAttachResourceConfig interface { - // ResourceAddr is the address to the resource - ResourceAddr() *ResourceAddress + GraphNodeResource // Sets the configuration - AttachResourceConfig(*config.Resource) + AttachResourceConfig(*configs.Resource) } // AttachResourceConfigTransformer goes through the graph and attaches -// resource configuration structures to nodes that implement the interfaces -// above. +// resource configuration structures to nodes that implement +// GraphNodeAttachManagedResourceConfig or GraphNodeAttachDataResourceConfig. // // The attached configuration structures are directly from the configuration. // If they're going to be modified, a copy should be made. type AttachResourceConfigTransformer struct { - Module *module.Tree // Module is the root module for the config + Config *configs.Config // Config is the root node in the config tree } func (t *AttachResourceConfigTransformer) Transform(g *Graph) error { - log.Printf("[TRACE] AttachResourceConfigTransformer: Beginning...") // Go through and find GraphNodeAttachResource for _, v := range g.Vertices() { @@ -41,36 +38,35 @@ func (t *AttachResourceConfigTransformer) Transform(g *Graph) error { // Determine what we're looking for addr := arn.ResourceAddr() - log.Printf( - "[TRACE] AttachResourceConfigTransformer: Attach resource "+ - "config request: %s", addr) // Get the configuration. - path := normalizeModulePath(addr.Path) - path = path[1:] - tree := t.Module.Child(path) - if tree == nil { + config := t.Config.DescendentForInstance(addr.Module) + if config == nil { + log.Printf("[TRACE] AttachResourceConfigTransformer: %q (%T) has no configuration available", dag.VertexName(v), v) continue } - // Go through the resource configs to find the matching config - for _, r := range tree.Config().Resources { - // Get a resource address so we can compare - a, err := parseResourceAddressConfig(r) - if err != nil { - panic(fmt.Sprintf( - "Error parsing config address, this is a bug: %#v", r)) + for _, r := range config.Module.ManagedResources { + rAddr := r.Addr() + + if rAddr != addr.Resource { + // Not the same resource + continue } - a.Path = addr.Path - // If this is not the same resource, then continue - if !a.Equals(addr) { + log.Printf("[TRACE] AttachResourceConfigTransformer: attaching to %q (%T) config from %s", dag.VertexName(v), v, r.DeclRange) + arn.AttachResourceConfig(r) + } + for _, r := range config.Module.DataResources { + rAddr := r.Addr() + + if rAddr != addr.Resource { + // Not the same resource continue } - log.Printf("[TRACE] Attaching resource config: %#v", r) + log.Printf("[TRACE] AttachResourceConfigTransformer: attaching to %q (%T) config from %#v", dag.VertexName(v), v, r.DeclRange) arn.AttachResourceConfig(r) - break } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_schema.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_schema.go new file mode 100644 index 0000000000..c7695dd4ee --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_schema.go @@ -0,0 +1,99 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/dag" +) + +// GraphNodeAttachResourceSchema is an interface implemented by node types +// that need a resource schema attached. +type GraphNodeAttachResourceSchema interface { + GraphNodeResource + GraphNodeProviderConsumer + + AttachResourceSchema(schema *configschema.Block, version uint64) +} + +// GraphNodeAttachProviderConfigSchema is an interface implemented by node types +// that need a provider configuration schema attached. +type GraphNodeAttachProviderConfigSchema interface { + GraphNodeProvider + + AttachProviderConfigSchema(*configschema.Block) +} + +// GraphNodeAttachProvisionerSchema is an interface implemented by node types +// that need one or more provisioner schemas attached. +type GraphNodeAttachProvisionerSchema interface { + ProvisionedBy() []string + + // SetProvisionerSchema is called during transform for each provisioner + // type returned from ProvisionedBy, providing the configuration schema + // for each provisioner in turn. The implementer should save these for + // later use in evaluating provisioner configuration blocks. + AttachProvisionerSchema(name string, schema *configschema.Block) +} + +// AttachSchemaTransformer finds nodes that implement +// GraphNodeAttachResourceSchema, GraphNodeAttachProviderConfigSchema, or +// GraphNodeAttachProvisionerSchema, looks up the needed schemas for each +// and then passes them to a method implemented by the node. +type AttachSchemaTransformer struct { + Schemas *Schemas +} + +func (t *AttachSchemaTransformer) Transform(g *Graph) error { + if t.Schemas == nil { + // Should never happen with a reasonable caller, but we'll return a + // proper error here anyway so that we'll fail gracefully. + return fmt.Errorf("AttachSchemaTransformer used with nil Schemas") + } + + for _, v := range g.Vertices() { + + if tv, ok := v.(GraphNodeAttachResourceSchema); ok { + addr := tv.ResourceAddr() + mode := addr.Resource.Mode + typeName := addr.Resource.Type + providerAddr, _ := tv.ProvidedBy() + providerType := providerAddr.ProviderConfig.Type + + schema, version := t.Schemas.ResourceTypeConfig(providerType, mode, typeName) + if schema == nil { + log.Printf("[ERROR] AttachSchemaTransformer: No resource schema available for %s", addr) + continue + } + log.Printf("[TRACE] AttachSchemaTransformer: attaching resource schema to %s", dag.VertexName(v)) + tv.AttachResourceSchema(schema, version) + } + + if tv, ok := v.(GraphNodeAttachProviderConfigSchema); ok { + providerAddr := tv.ProviderAddr() + schema := t.Schemas.ProviderConfig(providerAddr.ProviderConfig.Type) + if schema == nil { + log.Printf("[ERROR] AttachSchemaTransformer: No provider config schema available for %s", providerAddr) + continue + } + log.Printf("[TRACE] AttachSchemaTransformer: attaching provider config schema to %s", dag.VertexName(v)) + tv.AttachProviderConfigSchema(schema) + } + + if tv, ok := v.(GraphNodeAttachProvisionerSchema); ok { + names := tv.ProvisionedBy() + for _, name := range names { + schema := t.Schemas.ProvisionerConfig(name) + if schema == nil { + log.Printf("[ERROR] AttachSchemaTransformer: No schema available for provisioner %q on %q", name, dag.VertexName(v)) + continue + } + log.Printf("[TRACE] AttachSchemaTransformer: attaching provisioner %q config schema to %s", name, dag.VertexName(v)) + tv.AttachProvisionerSchema(name, schema) + } + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go index 564ff08f1f..3af7b989dc 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go @@ -4,64 +4,64 @@ import ( "log" "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/states" ) // GraphNodeAttachResourceState is an interface that can be implemented // to request that a ResourceState is attached to the node. +// +// Due to a historical naming inconsistency, the type ResourceState actually +// represents the state for a particular _instance_, while InstanceState +// represents the values for that instance during a particular phase +// (e.g. primary vs. deposed). Consequently, GraphNodeAttachResourceState +// is supported only for nodes that represent resource instances, even though +// the name might suggest it is for containing resources. type GraphNodeAttachResourceState interface { - // The address to the resource for the state - ResourceAddr() *ResourceAddress + GraphNodeResourceInstance // Sets the state - AttachResourceState(*ResourceState) + AttachResourceState(*states.Resource) } // AttachStateTransformer goes through the graph and attaches // state to nodes that implement the interfaces above. type AttachStateTransformer struct { - State *State // State is the root state + State *states.State // State is the root state } func (t *AttachStateTransformer) Transform(g *Graph) error { // If no state, then nothing to do if t.State == nil { - log.Printf("[DEBUG] Not attaching any state: state is nil") + log.Printf("[DEBUG] Not attaching any node states: overall state is nil") return nil } - filter := &StateFilter{State: t.State} for _, v := range g.Vertices() { - // Only care about nodes requesting we're adding state + // Nodes implement this interface to request state attachment. an, ok := v.(GraphNodeAttachResourceState) if !ok { continue } - addr := an.ResourceAddr() + addr := an.ResourceInstanceAddr() - // Get the module state - results, err := filter.Filter(addr.String()) - if err != nil { - return err + rs := t.State.Resource(addr.ContainingResource()) + if rs == nil { + log.Printf("[DEBUG] Resource state not found for node %q, instance %s", dag.VertexName(v), addr) + continue } - // Attach the first resource state we get - found := false - for _, result := range results { - if rs, ok := result.Value.(*ResourceState); ok { - log.Printf( - "[DEBUG] Attaching resource state to %q: %#v", - dag.VertexName(v), rs) - an.AttachResourceState(rs) - found = true - break - } + is := rs.Instance(addr.Resource.Key) + if is == nil { + // We don't actually need this here, since we'll attach the whole + // resource state, but we still check because it'd be weird + // for the specific instance we're attaching to not to exist. + log.Printf("[DEBUG] Resource instance state not found for node %q, instance %s", dag.VertexName(v), addr) + continue } - if !found { - log.Printf( - "[DEBUG] Resource state not found for %q: %s", - dag.VertexName(v), addr) - } + // make sure to attach a copy of the state, so instances can modify the + // same ResourceState. + an.AttachResourceState(rs.DeepCopy()) } return nil diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go index 61bce8532a..9d3b6f4b49 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_config.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go @@ -1,13 +1,11 @@ package terraform import ( - "errors" - "fmt" "log" "sync" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/dag" ) @@ -26,14 +24,14 @@ type ConfigTransformer struct { Concrete ConcreteResourceNodeFunc // Module is the module to add resources from. - Module *module.Tree + Config *configs.Config // Unique will only add resources that aren't already present in the graph. Unique bool // Mode will only add resources that match the given mode ModeFilter bool - Mode config.ResourceMode + Mode addrs.ResourceMode l sync.Mutex uniqueMap map[string]struct{} @@ -44,16 +42,11 @@ func (t *ConfigTransformer) Transform(g *Graph) error { t.l.Lock() defer t.l.Unlock() - // If no module is given, we don't do anything - if t.Module == nil { + // If no configuration is available, we don't do anything + if t.Config == nil { return nil } - // If the module isn't loaded, that is simply an error - if !t.Module.Loaded() { - return errors.New("module must be loaded for ConfigTransformer") - } - // Reset the uniqueness map. If we're tracking uniques, then populate // it with addresses. t.uniqueMap = make(map[string]struct{}) @@ -67,22 +60,22 @@ func (t *ConfigTransformer) Transform(g *Graph) error { } // Start the transformation process - return t.transform(g, t.Module) + return t.transform(g, t.Config) } -func (t *ConfigTransformer) transform(g *Graph, m *module.Tree) error { +func (t *ConfigTransformer) transform(g *Graph, config *configs.Config) error { // If no config, do nothing - if m == nil { + if config == nil { return nil } // Add our resources - if err := t.transformSingle(g, m); err != nil { + if err := t.transformSingle(g, config); err != nil { return err } // Transform all the children. - for _, c := range m.Children() { + for _, c := range config.Children { if err := t.transform(g, c); err != nil { return err } @@ -91,43 +84,48 @@ func (t *ConfigTransformer) transform(g *Graph, m *module.Tree) error { return nil } -func (t *ConfigTransformer) transformSingle(g *Graph, m *module.Tree) error { - log.Printf("[TRACE] ConfigTransformer: Starting for path: %v", m.Path()) - - // Get the configuration for this module - conf := m.Config() - - // Build the path we're at - path := m.Path() +func (t *ConfigTransformer) transformSingle(g *Graph, config *configs.Config) error { + path := config.Path + module := config.Module + log.Printf("[TRACE] ConfigTransformer: Starting for path: %v", path) + + // For now we assume that each module call produces only one module + // instance with no key, since we don't yet support "count" and "for_each" + // on modules. + // FIXME: As part of supporting "count" and "for_each" on modules, rework + // this so that we'll "expand" the module call first and then create graph + // nodes for each module instance separately. + instPath := path.UnkeyedInstanceShim() + + allResources := make([]*configs.Resource, 0, len(module.ManagedResources)+len(module.DataResources)) + for _, r := range module.ManagedResources { + allResources = append(allResources, r) + } + for _, r := range module.DataResources { + allResources = append(allResources, r) + } - // Write all the resources out - for _, r := range conf.Resources { - // Build the resource address - addr, err := parseResourceAddressConfig(r) - if err != nil { - panic(fmt.Sprintf( - "Error parsing config address, this is a bug: %#v", r)) - } - addr.Path = path + for _, r := range allResources { + relAddr := r.Addr() - // If this is already in our uniqueness map, don't add it again - if _, ok := t.uniqueMap[addr.String()]; ok { + if t.ModeFilter && relAddr.Mode != t.Mode { + // Skip non-matching modes continue } - // Remove non-matching modes - if t.ModeFilter && addr.Mode != t.Mode { + addr := relAddr.Absolute(instPath) + if _, ok := t.uniqueMap[addr.String()]; ok { + // We've already seen a resource with this address. This should + // never happen, because we enforce uniqueness in the config loader. continue } - // Build the abstract node and the concrete one abstract := &NodeAbstractResource{Addr: addr} var node dag.Vertex = abstract if f := t.Concrete; f != nil { node = f(abstract) } - // Add it to the graph g.Add(node) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go index 92f9888d6d..866c917594 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go @@ -1,9 +1,7 @@ package terraform import ( - "errors" - - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/dag" ) @@ -20,54 +18,47 @@ import ( type FlatConfigTransformer struct { Concrete ConcreteResourceNodeFunc // What to turn resources into - Module *module.Tree + Config *configs.Config } func (t *FlatConfigTransformer) Transform(g *Graph) error { - // If no module, we do nothing - if t.Module == nil { + // We have nothing to do if there is no configuration. + if t.Config == nil { return nil } - // If the module is not loaded, that is an error - if !t.Module.Loaded() { - return errors.New("module must be loaded") - } - - return t.transform(g, t.Module) + return t.transform(g, t.Config) } -func (t *FlatConfigTransformer) transform(g *Graph, m *module.Tree) error { - // If no module, no problem - if m == nil { +func (t *FlatConfigTransformer) transform(g *Graph, config *configs.Config) error { + // If we have no configuration then there's nothing to do. + if config == nil { return nil } // Transform all the children. - for _, c := range m.Children() { + for _, c := range config.Children { if err := t.transform(g, c); err != nil { return err } } - // Get the configuration for this module - config := m.Config() - - // Write all the resources out - for _, r := range config.Resources { - // Grab the address for this resource - addr, err := parseResourceAddressConfig(r) - if err != nil { - return err - } - addr.Path = m.Path() + module := config.Module + // For now we assume that each module call produces only one module + // instance with no key, since we don't yet support "count" and "for_each" + // on modules. + // FIXME: As part of supporting "count" and "for_each" on modules, rework + // this so that we'll "expand" the module call first and then create graph + // nodes for each module instance separately. + instPath := config.Path.UnkeyedInstanceShim() - // Build the abstract resource. We have the config already so - // we'll just pre-populate that. + for _, r := range module.ManagedResources { + addr := r.Addr().Absolute(instPath) abstract := &NodeAbstractResource{ Addr: addr, Config: r, } + // Grab the address for this resource var node dag.Vertex = abstract if f := t.Concrete; f != nil { node = f(abstract) diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go index 83415f3525..01601bdda7 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go @@ -1,16 +1,21 @@ package terraform import ( + "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/dag" ) // CountBoundaryTransformer adds a node that depends on everything else // so that it runs last in order to clean up the state for nodes that // are on the "count boundary": "foo.0" when only one exists becomes "foo" -type CountBoundaryTransformer struct{} +type CountBoundaryTransformer struct { + Config *configs.Config +} func (t *CountBoundaryTransformer) Transform(g *Graph) error { - node := &NodeCountBoundary{} + node := &NodeCountBoundary{ + Config: t.Config, + } g.Add(node) // Depends on everything diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go b/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go deleted file mode 100644 index 87a1f9c987..0000000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go +++ /dev/null @@ -1,178 +0,0 @@ -package terraform - -import "fmt" - -// DeposedTransformer is a GraphTransformer that adds deposed resources -// to the graph. -type DeposedTransformer struct { - // State is the global state. We'll automatically find the correct - // ModuleState based on the Graph.Path that is being transformed. - State *State - - // View, if non-empty, is the ModuleState.View used around the state - // to find deposed resources. - View string - - // The provider used by the resourced which were deposed - ResolvedProvider string -} - -func (t *DeposedTransformer) Transform(g *Graph) error { - state := t.State.ModuleByPath(g.Path) - if state == nil { - // If there is no state for our module there can't be any deposed - // resources, since they live in the state. - return nil - } - - // If we have a view, apply it now - if t.View != "" { - state = state.View(t.View) - } - - // Go through all the resources in our state to look for deposed resources - for k, rs := range state.Resources { - // If we have no deposed resources, then move on - if len(rs.Deposed) == 0 { - continue - } - - deposed := rs.Deposed - - for i, _ := range deposed { - g.Add(&graphNodeDeposedResource{ - Index: i, - ResourceName: k, - ResourceType: rs.Type, - ProviderName: rs.Provider, - ResolvedProvider: t.ResolvedProvider, - }) - } - } - - return nil -} - -// graphNodeDeposedResource is the graph vertex representing a deposed resource. -type graphNodeDeposedResource struct { - Index int - ResourceName string - ResourceType string - ProviderName string - ResolvedProvider string -} - -func (n *graphNodeDeposedResource) Name() string { - return fmt.Sprintf("%s (deposed #%d)", n.ResourceName, n.Index) -} - -func (n *graphNodeDeposedResource) ProvidedBy() string { - return resourceProvider(n.ResourceName, n.ProviderName) -} - -func (n *graphNodeDeposedResource) SetProvider(p string) { - n.ResolvedProvider = p -} - -// GraphNodeEvalable impl. -func (n *graphNodeDeposedResource) EvalTree() EvalNode { - var provider ResourceProvider - var state *InstanceState - - seq := &EvalSequence{Nodes: make([]EvalNode, 0, 5)} - - // Build instance info - info := &InstanceInfo{Id: n.Name(), Type: n.ResourceType} - seq.Nodes = append(seq.Nodes, &EvalInstanceInfo{Info: info}) - - // Refresh the resource - seq.Nodes = append(seq.Nodes, &EvalOpFilter{ - Ops: []walkOperation{walkRefresh}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Name: n.ResolvedProvider, - Output: &provider, - }, - &EvalReadStateDeposed{ - Name: n.ResourceName, - Output: &state, - Index: n.Index, - }, - &EvalRefresh{ - Info: info, - Provider: &provider, - State: &state, - Output: &state, - }, - &EvalWriteStateDeposed{ - Name: n.ResourceName, - ResourceType: n.ResourceType, - Provider: n.ResolvedProvider, - State: &state, - Index: n.Index, - }, - }, - }, - }) - - // Apply - var diff *InstanceDiff - var err error - seq.Nodes = append(seq.Nodes, &EvalOpFilter{ - Ops: []walkOperation{walkApply, walkDestroy}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Name: n.ResolvedProvider, - Output: &provider, - }, - &EvalReadStateDeposed{ - Name: n.ResourceName, - Output: &state, - Index: n.Index, - }, - &EvalDiffDestroy{ - Info: info, - State: &state, - Output: &diff, - }, - // Call pre-apply hook - &EvalApplyPre{ - Info: info, - State: &state, - Diff: &diff, - }, - &EvalApply{ - Info: info, - State: &state, - Diff: &diff, - Provider: &provider, - Output: &state, - Error: &err, - }, - // Always write the resource back to the state deposed... if it - // was successfully destroyed it will be pruned. If it was not, it will - // be caught on the next run. - &EvalWriteStateDeposed{ - Name: n.ResourceName, - ResourceType: n.ResourceType, - Provider: n.ResolvedProvider, - State: &state, - Index: n.Index, - }, - &EvalApplyPost{ - Info: info, - State: &state, - Error: &err, - }, - &EvalReturnError{ - Error: &err, - }, - &EvalUpdateStateHook{}, - }, - }, - }) - - return seq -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go index edfb460bfd..2f4d5edeba 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go @@ -4,15 +4,15 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/states" ) // GraphNodeDestroyerCBD must be implemented by nodes that might be -// create-before-destroy destroyers. +// create-before-destroy destroyers, or might plan a create-before-destroy +// action. type GraphNodeDestroyerCBD interface { - GraphNodeDestroyer - // CreateBeforeDestroy returns true if this node represents a node // that is doing a CBD. CreateBeforeDestroy() bool @@ -23,6 +23,89 @@ type GraphNodeDestroyerCBD interface { ModifyCreateBeforeDestroy(bool) error } +// GraphNodeAttachDestroyer is implemented by applyable nodes that have a +// companion destroy node. This allows the creation node to look up the status +// of the destroy node and determine if it needs to depose the existing state, +// or replace it. +// If a node is not marked as create-before-destroy in the configuration, but a +// dependency forces that status, only the destroy node will be aware of that +// status. +type GraphNodeAttachDestroyer interface { + // AttachDestroyNode takes a destroy node and saves a reference to that + // node in the receiver, so it can later check the status of + // CreateBeforeDestroy(). + AttachDestroyNode(n GraphNodeDestroyerCBD) +} + +// ForcedCBDTransformer detects when a particular CBD-able graph node has +// dependencies with another that has create_before_destroy set that require +// it to be forced on, and forces it on. +// +// This must be used in the plan graph builder to ensure that +// create_before_destroy settings are properly propagated before constructing +// the planned changes. This requires that the plannable resource nodes +// implement GraphNodeDestroyerCBD. +type ForcedCBDTransformer struct { +} + +func (t *ForcedCBDTransformer) Transform(g *Graph) error { + for _, v := range g.Vertices() { + dn, ok := v.(GraphNodeDestroyerCBD) + if !ok { + continue + } + + if !dn.CreateBeforeDestroy() { + // If there are no CBD decendent (dependent nodes), then we + // do nothing here. + if !t.hasCBDDescendent(g, v) { + log.Printf("[TRACE] ForcedCBDTransformer: %q (%T) has no CBD descendent, so skipping", dag.VertexName(v), v) + continue + } + + // If this isn't naturally a CBD node, this means that an descendent is + // and we need to auto-upgrade this node to CBD. We do this because + // a CBD node depending on non-CBD will result in cycles. To avoid this, + // we always attempt to upgrade it. + log.Printf("[TRACE] ForcedCBDTransformer: forcing create_before_destroy on for %q (%T)", dag.VertexName(v), v) + if err := dn.ModifyCreateBeforeDestroy(true); err != nil { + return fmt.Errorf( + "%s: must have create before destroy enabled because "+ + "a dependent resource has CBD enabled. However, when "+ + "attempting to automatically do this, an error occurred: %s", + dag.VertexName(v), err) + } + } else { + log.Printf("[TRACE] ForcedCBDTransformer: %q (%T) already has create_before_destroy set", dag.VertexName(v), v) + } + } + return nil +} + +// hasCBDDescendent returns true if any descendent (node that depends on this) +// has CBD set. +func (t *ForcedCBDTransformer) hasCBDDescendent(g *Graph, v dag.Vertex) bool { + s, _ := g.Descendents(v) + if s == nil { + return true + } + + for _, ov := range s.List() { + dn, ok := ov.(GraphNodeDestroyerCBD) + if !ok { + continue + } + + if dn.CreateBeforeDestroy() { + // some descendent is CreateBeforeDestroy, so we need to follow suit + log.Printf("[TRACE] ForcedCBDTransformer: %q has CBD descendent %q", dag.VertexName(v), dag.VertexName(ov)) + return true + } + } + + return false +} + // CBDEdgeTransformer modifies the edges of CBD nodes that went through // the DestroyEdgeTransformer to have the right dependencies. There are // two real tasks here: @@ -35,16 +118,25 @@ type GraphNodeDestroyerCBD interface { // update to A. Example: adding a web server updates the load balancer // before deleting the old web server. // +// This transformer requires that a previous transformer has already forced +// create_before_destroy on for nodes that are depended on by explicit CBD +// nodes. This is the logic in ForcedCBDTransformer, though in practice we +// will get here by recording the CBD-ness of each change in the plan during +// the plan walk and then forcing the nodes into the appropriate setting during +// DiffTransformer when building the apply graph. type CBDEdgeTransformer struct { // Module and State are only needed to look up dependencies in // any way possible. Either can be nil if not availabile. - Module *module.Tree - State *State + Config *configs.Config + State *states.State + + // If configuration is present then Schemas is required in order to + // obtain schema information from providers and provisioners so we can + // properly resolve implicit dependencies. + Schemas *Schemas } func (t *CBDEdgeTransformer) Transform(g *Graph) error { - log.Printf("[TRACE] CBDEdgeTransformer: Beginning CBD transformation...") - // Go through and reverse any destroy edges destroyMap := make(map[string][]dag.Vertex) for _, v := range g.Vertices() { @@ -52,25 +144,13 @@ func (t *CBDEdgeTransformer) Transform(g *Graph) error { if !ok { continue } + dern, ok := v.(GraphNodeDestroyer) + if !ok { + continue + } if !dn.CreateBeforeDestroy() { - // If there are no CBD ancestors (dependent nodes), then we - // do nothing here. - if !t.hasCBDAncestor(g, v) { - continue - } - - // If this isn't naturally a CBD node, this means that an ancestor is - // and we need to auto-upgrade this node to CBD. We do this because - // a CBD node depending on non-CBD will result in cycles. To avoid this, - // we always attempt to upgrade it. - if err := dn.ModifyCreateBeforeDestroy(true); err != nil { - return fmt.Errorf( - "%s: must have create before destroy enabled because "+ - "a dependent resource has CBD enabled. However, when "+ - "attempting to automatically do this, an error occurred: %s", - dag.VertexName(v), err) - } + continue } // Find the destroy edge. There should only be one. @@ -86,7 +166,9 @@ func (t *CBDEdgeTransformer) Transform(g *Graph) error { // Found it! Invert. g.RemoveEdge(de) - g.Connect(&DestroyEdge{S: de.Target(), T: de.Source()}) + applyNode := de.Source() + destroyNode := de.Target() + g.Connect(&DestroyEdge{S: destroyNode, T: applyNode}) } // If the address has an index, we strip that. Our depMap creation @@ -94,15 +176,11 @@ func (t *CBDEdgeTransformer) Transform(g *Graph) error { // dependencies. One day when we limit dependencies more exactly // this will have to change. We have a test case covering this // (depNonCBDCountBoth) so it'll be caught. - addr := dn.DestroyAddr() - if addr.Index >= 0 { - addr = addr.Copy() // Copy so that we don't modify any pointers - addr.Index = -1 - } + addr := dern.DestroyAddr() + key := addr.ContainingResource().String() // Add this to the list of nodes that we need to fix up // the edges for (step 2 above in the docs). - key := addr.String() destroyMap[key] = append(destroyMap[key], v) } @@ -151,13 +229,9 @@ func (t *CBDEdgeTransformer) Transform(g *Graph) error { // dependencies. One day when we limit dependencies more exactly // this will have to change. We have a test case covering this // (depNonCBDCount) so it'll be caught. - if addr.Index >= 0 { - addr = addr.Copy() // Copy so that we don't modify any pointers - addr.Index = -1 - } + key := addr.ContainingResource().String() // If there is nothing this resource should depend on, ignore it - key := addr.String() dns, ok := depMap[key] if !ok { continue @@ -174,21 +248,21 @@ func (t *CBDEdgeTransformer) Transform(g *Graph) error { return nil } -func (t *CBDEdgeTransformer) depMap( - destroyMap map[string][]dag.Vertex) (map[string][]dag.Vertex, error) { +func (t *CBDEdgeTransformer) depMap(destroyMap map[string][]dag.Vertex) (map[string][]dag.Vertex, error) { // Build the graph of our config, this ensures that all resources // are present in the graph. - g, err := (&BasicGraphBuilder{ + g, diags := (&BasicGraphBuilder{ Steps: []GraphTransformer{ - &FlatConfigTransformer{Module: t.Module}, - &AttachResourceConfigTransformer{Module: t.Module}, + &FlatConfigTransformer{Config: t.Config}, + &AttachResourceConfigTransformer{Config: t.Config}, &AttachStateTransformer{State: t.State}, + &AttachSchemaTransformer{Schemas: t.Schemas}, &ReferenceTransformer{}, }, Name: "CBDEdgeTransformer", }).Build(nil) - if err != nil { - return nil, err + if diags.HasErrors() { + return nil, diags.Err() } // Using this graph, build the list of destroy nodes that each resource @@ -232,26 +306,3 @@ func (t *CBDEdgeTransformer) depMap( return depMap, nil } - -// hasCBDAncestor returns true if any ancestor (node that depends on this) -// has CBD set. -func (t *CBDEdgeTransformer) hasCBDAncestor(g *Graph, v dag.Vertex) bool { - s, _ := g.Ancestors(v) - if s == nil { - return true - } - - for _, v := range s.List() { - dn, ok := v.(GraphNodeDestroyerCBD) - if !ok { - continue - } - - if dn.CreateBeforeDestroy() { - // some ancestor is CreateBeforeDestroy, so we need to follow suit - return true - } - } - - return false -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go index a06ff292e2..7fb415bdfc 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go @@ -3,7 +3,10 @@ package terraform import ( "log" - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" + + "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/dag" ) @@ -11,16 +14,16 @@ import ( type GraphNodeDestroyer interface { dag.Vertex - // ResourceAddr is the address of the resource that is being + // DestroyAddr is the address of the resource that is being // destroyed by this node. If this returns nil, then this node // is not destroying anything. - DestroyAddr() *ResourceAddress + DestroyAddr() *addrs.AbsResourceInstance } // GraphNodeCreator must be implemented by nodes that create OR update resources. type GraphNodeCreator interface { - // ResourceAddr is the address of the resource being created or updated - CreateAddr() *ResourceAddress + // CreateAddr is the address of the resource being created or updated + CreateAddr() *addrs.AbsResourceInstance } // DestroyEdgeTransformer is a GraphTransformer that creates the proper @@ -40,33 +43,37 @@ type GraphNodeCreator interface { type DestroyEdgeTransformer struct { // These are needed to properly build the graph of dependencies // to determine what a destroy node depends on. Any of these can be nil. - Module *module.Tree - State *State + Config *configs.Config + State *states.State + + // If configuration is present then Schemas is required in order to + // obtain schema information from providers and provisioners in order + // to properly resolve implicit dependencies. + Schemas *Schemas } func (t *DestroyEdgeTransformer) Transform(g *Graph) error { - log.Printf("[TRACE] DestroyEdgeTransformer: Beginning destroy edge transformation...") - // Build a map of what is being destroyed (by address string) to - // the list of destroyers. In general there will only be one destroyer - // but to make it more robust we support multiple. + // the list of destroyers. Usually there will be at most one destroyer + // per node, but we allow multiple if present for completeness. destroyers := make(map[string][]GraphNodeDestroyer) + destroyerAddrs := make(map[string]addrs.AbsResourceInstance) for _, v := range g.Vertices() { dn, ok := v.(GraphNodeDestroyer) if !ok { continue } - addr := dn.DestroyAddr() - if addr == nil { + addrP := dn.DestroyAddr() + if addrP == nil { continue } + addr := *addrP key := addr.String() - log.Printf( - "[TRACE] DestroyEdgeTransformer: %s destroying %q", - dag.VertexName(dn), key) + log.Printf("[TRACE] DestroyEdgeTransformer: %q (%T) destroys %s", dag.VertexName(dn), v, key) destroyers[key] = append(destroyers[key], dn) + destroyerAddrs[key] = addr } // If we aren't destroying anything, there will be no edges to make @@ -100,10 +107,20 @@ func (t *DestroyEdgeTransformer) Transform(g *Graph) error { a := v log.Printf( - "[TRACE] DestroyEdgeTransformer: connecting creator/destroyer: %s, %s", + "[TRACE] DestroyEdgeTransformer: connecting creator %q with destroyer %q", dag.VertexName(a), dag.VertexName(a_d)) g.Connect(&DestroyEdge{S: a, T: a_d}) + + // Attach the destroy node to the creator + // There really shouldn't be more than one destroyer, but even if + // there are, any of them will represent the correct + // CreateBeforeDestroy status. + if n, ok := cn.(GraphNodeAttachDestroyer); ok { + if d, ok := d.(GraphNodeDestroyerCBD); ok { + n.AttachDestroyNode(d) + } + } } } @@ -120,20 +137,24 @@ func (t *DestroyEdgeTransformer) Transform(g *Graph) error { } steps := []GraphTransformer{ // Add the local values - &LocalTransformer{Module: t.Module}, + &LocalTransformer{Config: t.Config}, // Add outputs and metadata - &OutputTransformer{Module: t.Module}, - &AttachResourceConfigTransformer{Module: t.Module}, + &OutputTransformer{Config: t.Config}, + &AttachResourceConfigTransformer{Config: t.Config}, &AttachStateTransformer{State: t.State}, - TransformProviders(nil, providerFn, t.Module), - // Add all the variables. We can depend on resources through // variables due to module parameters, and we need to properly // determine that. - &RootVariableTransformer{Module: t.Module}, - &ModuleVariableTransformer{Module: t.Module}, + &RootVariableTransformer{Config: t.Config}, + &ModuleVariableTransformer{Config: t.Config}, + + TransformProviders(nil, providerFn, t.Config), + + // Must attach schemas before ReferenceTransformer so that we can + // analyze the configuration to find references. + &AttachSchemaTransformer{Schemas: t.Schemas}, &ReferenceTransformer{}, } @@ -146,37 +167,36 @@ func (t *DestroyEdgeTransformer) Transform(g *Graph) error { // var tempG Graph var tempDestroyed []dag.Vertex - for d, _ := range destroyers { - // d is what is being destroyed. We parse the resource address - // which it came from it is a panic if this fails. - addr, err := ParseResourceAddress(d) - if err != nil { - panic(err) - } + for d := range destroyers { + // d is the string key for the resource being destroyed. We actually + // want the address value, which we stashed earlier. + addr := destroyerAddrs[d] // This part is a little bit weird but is the best way to // find the dependencies we need to: build a graph and use the // attach config and state transformers then ask for references. - abstract := &NodeAbstractResource{Addr: addr} + abstract := NewNodeAbstractResourceInstance(addr) tempG.Add(abstract) tempDestroyed = append(tempDestroyed, abstract) // We also add the destroy version here since the destroy can // depend on things that the creation doesn't (destroy provisioners). - destroy := &NodeDestroyResource{NodeAbstractResource: abstract} + destroy := &NodeDestroyResourceInstance{NodeAbstractResourceInstance: abstract} tempG.Add(destroy) tempDestroyed = append(tempDestroyed, destroy) } // Run the graph transforms so we have the information we need to // build references. + log.Printf("[TRACE] DestroyEdgeTransformer: constructing temporary graph for analysis of references, starting from:\n%s", tempG.StringWithNodeTypes()) for _, s := range steps { + log.Printf("[TRACE] DestroyEdgeTransformer: running %T on temporary graph", s) if err := s.Transform(&tempG); err != nil { + log.Printf("[TRACE] DestroyEdgeTransformer: %T failed: %s", s, err) return err } } - - log.Printf("[TRACE] DestroyEdgeTransformer: reference graph: %s", tempG.String()) + log.Printf("[TRACE] DestroyEdgeTransformer: temporary reference graph:\n%s", tempG.String()) // Go through all the nodes in the graph and determine what they // depend on. @@ -207,16 +227,13 @@ func (t *DestroyEdgeTransformer) Transform(g *Graph) error { // Get the destroy node for this. In the example of our struct, // we are currently at B and we're looking for B_d. - rn, ok := v.(GraphNodeResource) + rn, ok := v.(GraphNodeResourceInstance) if !ok { + log.Printf("[TRACE] DestroyEdgeTransformer: skipping %s, since it's not a resource", dag.VertexName(v)) continue } - addr := rn.ResourceAddr() - if addr == nil { - continue - } - + addr := rn.ResourceInstanceAddr() dns := destroyers[addr.String()] // We have dependencies, check if any are being destroyed @@ -231,16 +248,12 @@ func (t *DestroyEdgeTransformer) Transform(g *Graph) error { // to see if A_d exists. var depDestroyers []dag.Vertex for _, v := range refs { - rn, ok := v.(GraphNodeResource) + rn, ok := v.(GraphNodeResourceInstance) if !ok { continue } - addr := rn.ResourceAddr() - if addr == nil { - continue - } - + addr := rn.ResourceInstanceAddr() key := addr.String() if ds, ok := destroyers[key]; ok { for _, d := range ds { @@ -257,6 +270,7 @@ func (t *DestroyEdgeTransformer) Transform(g *Graph) error { for _, a_d := range dns { for _, b_d := range depDestroyers { if b_d != a_d { + log.Printf("[TRACE] DestroyEdgeTransformer: %q depends on %q", dag.VertexName(b_d), dag.VertexName(a_d)) g.Connect(dag.BasicEdge(b_d, a_d)) } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go b/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go index ad46d3c612..6fb915f871 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go @@ -4,83 +4,189 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/config/module" "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) -// DiffTransformer is a GraphTransformer that adds the elements of -// the diff to the graph. -// -// This transform is used for example by the ApplyGraphBuilder to ensure -// that only resources that are being modified are represented in the graph. -// -// Module and State is still required for the DiffTransformer for annotations -// since the Diff doesn't contain all the information required to build the -// complete graph (such as create-before-destroy information). The graph -// is built based on the diff first, though, ensuring that only resources -// that are being modified are present in the graph. +// DiffTransformer is a GraphTransformer that adds graph nodes representing +// each of the resource changes described in the given Changes object. type DiffTransformer struct { - Concrete ConcreteResourceNodeFunc - - Diff *Diff - Module *module.Tree - State *State + Concrete ConcreteResourceInstanceNodeFunc + State *states.State + Changes *plans.Changes } func (t *DiffTransformer) Transform(g *Graph) error { - // If the diff is nil or empty (nil is empty) then do nothing - if t.Diff.Empty() { + if t.Changes == nil || len(t.Changes.Resources) == 0 { + // Nothing to do! return nil } // Go through all the modules in the diff. - log.Printf("[TRACE] DiffTransformer: starting") - var nodes []dag.Vertex - for _, m := range t.Diff.Modules { - log.Printf("[TRACE] DiffTransformer: Module: %s", m) - // TODO: If this is a destroy diff then add a module destroy node - - // Go through all the resources in this module. - for name, inst := range m.Resources { - log.Printf("[TRACE] DiffTransformer: Resource %q: %#v", name, inst) - - // We have changes! This is a create or update operation. - // First grab the address so we have a unique way to - // reference this resource. - addr, err := parseResourceAddressInternal(name) - if err != nil { - panic(fmt.Sprintf( - "Error parsing internal name, this is a bug: %q", name)) - } + log.Printf("[TRACE] DiffTransformer starting") + + var diags tfdiags.Diagnostics + state := t.State + changes := t.Changes + + // DiffTransformer creates resource _instance_ nodes. If there are any + // whole-resource nodes already in the graph, we must ensure that they + // get evaluated before any of the corresponding instances by creating + // dependency edges, so we'll do some prep work here to ensure we'll only + // create connections to nodes that existed before we started here. + resourceNodes := map[string][]GraphNodeResource{} + for _, node := range g.Vertices() { + rn, ok := node.(GraphNodeResource) + if !ok { + continue + } + // We ignore any instances that _also_ implement + // GraphNodeResourceInstance, since in the unlikely event that they + // do exist we'd probably end up creating cycles by connecting them. + if _, ok := node.(GraphNodeResourceInstance); ok { + continue + } + + addr := rn.ResourceAddr().String() + resourceNodes[addr] = append(resourceNodes[addr], rn) + } + + for _, rc := range changes.Resources { + addr := rc.Addr + dk := rc.DeposedKey + + log.Printf("[TRACE] DiffTransformer: found %s change for %s %s", rc.Action, addr, dk) + + // Depending on the action we'll need some different combinations of + // nodes, because destroying uses a special node type separate from + // other actions. + var update, delete, createBeforeDestroy bool + switch rc.Action { + case plans.NoOp: + continue + case plans.Delete: + delete = true + case plans.DeleteThenCreate, plans.CreateThenDelete: + update = true + delete = true + createBeforeDestroy = (rc.Action == plans.CreateThenDelete) + default: + update = true + } + + if dk != states.NotDeposed && update { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid planned change for deposed object", + fmt.Sprintf("The plan contains a non-delete change for %s deposed object %s. The only valid action for a deposed object is to destroy it, so this is a bug in Terraform.", addr, dk), + )) + continue + } - // Very important: add the module path for this resource to - // the address. Remove "root" from it. - addr.Path = m.Path[1:] + // If we're going to do a create_before_destroy Replace operation then + // we need to allocate a DeposedKey to use to retain the + // not-yet-destroyed prior object, so that the delete node can destroy + // _that_ rather than the newly-created node, which will be current + // by the time the delete node is visited. + if update && delete && createBeforeDestroy { + // In this case, variable dk will be the _pre-assigned_ DeposedKey + // that must be used if the update graph node deposes the current + // instance, which will then align with the same key we pass + // into the destroy node to ensure we destroy exactly the deposed + // object we expect. + if state != nil { + ris := state.ResourceInstance(addr) + if ris == nil { + // Should never happen, since we don't plan to replace an + // instance that doesn't exist yet. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid planned change", + fmt.Sprintf("The plan contains a replace change for %s, which doesn't exist yet. This is a bug in Terraform.", addr), + )) + continue + } + + // Allocating a deposed key separately from using it can be racy + // in general, but we assume here that nothing except the apply + // node we instantiate below will actually make new deposed objects + // in practice, and so the set of already-used keys will not change + // between now and then. + dk = ris.FindUnusedDeposedKey() + } else { + // If we have no state at all yet then we can use _any_ + // DeposedKey. + dk = states.NewDeposedKey() + } + } - // If we're destroying, add the destroy node - if inst.Destroy || inst.GetDestroyDeposed() { - abstract := &NodeAbstractResource{Addr: addr} - g.Add(&NodeDestroyResource{NodeAbstractResource: abstract}) + if update { + // All actions except destroying the node type chosen by t.Concrete + abstract := NewNodeAbstractResourceInstance(addr) + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) } - // If we have changes, then add the applyable version - if len(inst.Attributes) > 0 { - // Add the resource to the graph - abstract := &NodeAbstractResource{Addr: addr} - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) + if createBeforeDestroy { + // We'll attach our pre-allocated DeposedKey to the node if + // it supports that. NodeApplyableResourceInstance is the + // specific concrete node type we are looking for here really, + // since that's the only node type that might depose objects. + if dn, ok := node.(GraphNodeDeposer); ok { + dn.SetPreallocatedDeposedKey(dk) } + log.Printf("[TRACE] DiffTransformer: %s will be represented by %s, deposing prior object to %s", addr, dag.VertexName(node), dk) + } else { + log.Printf("[TRACE] DiffTransformer: %s will be represented by %s", addr, dag.VertexName(node)) + } - nodes = append(nodes, node) + g.Add(node) + rsrcAddr := addr.ContainingResource().String() + for _, rsrcNode := range resourceNodes[rsrcAddr] { + g.Connect(dag.BasicEdge(node, rsrcNode)) + } + } + + if delete { + // Destroying always uses a destroy-specific node type, though + // which one depends on whether we're destroying a current object + // or a deposed object. + var node GraphNodeResourceInstance + abstract := NewNodeAbstractResourceInstance(addr) + if dk == states.NotDeposed { + node = &NodeDestroyResourceInstance{ + NodeAbstractResourceInstance: abstract, + DeposedKey: dk, + } + node.(*NodeDestroyResourceInstance).ModifyCreateBeforeDestroy(createBeforeDestroy) + } else { + node = &NodeDestroyDeposedResourceInstanceObject{ + NodeAbstractResourceInstance: abstract, + DeposedKey: dk, + } + } + if dk == states.NotDeposed { + log.Printf("[TRACE] DiffTransformer: %s will be represented for destruction by %s", addr, dag.VertexName(node)) + } else { + log.Printf("[TRACE] DiffTransformer: %s deposed object %s will be represented for destruction by %s", addr, dk, dag.VertexName(node)) + } + g.Add(node) + rsrcAddr := addr.ContainingResource().String() + for _, rsrcNode := range resourceNodes[rsrcAddr] { + // We connect this edge "forwards" (even though destroy dependencies + // are often inverted) because evaluating the resource node + // after the destroy node could cause an unnecessary husk of + // a resource state to be re-added. + g.Connect(dag.BasicEdge(node, rsrcNode)) } } - } - // Add all the nodes to the graph - for _, n := range nodes { - g.Add(n) } - return nil + log.Printf("[TRACE] DiffTransformer complete") + + return diags.Err() } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go index 3673771ca2..c1945f02d3 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go @@ -2,7 +2,10 @@ package terraform import ( "fmt" - "strings" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/tfdiags" ) // ImportProviderValidateTransformer is a GraphTransformer that goes through @@ -10,6 +13,8 @@ import ( type ImportProviderValidateTransformer struct{} func (t *ImportProviderValidateTransformer) Transform(g *Graph) error { + var diags tfdiags.Diagnostics + for _, v := range g.Vertices() { // We only care about providers pv, ok := v.(GraphNodeProvider) @@ -24,15 +29,16 @@ func (t *ImportProviderValidateTransformer) Transform(g *Graph) error { } for _, ref := range rn.References() { - if !strings.HasPrefix(ref, "var.") { - return fmt.Errorf( - "Provider %q depends on non-var %q. Providers for import can currently\n"+ - "only depend on variables or must be hardcoded. You can stop import\n"+ - "from loading configurations by specifying `-config=\"\"`.", - pv.ProviderName(), ref) + if _, ok := ref.Subject.(addrs.InputVariable); !ok { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider dependency for import", + Detail: fmt.Sprintf("The configuration for %s depends on %s. Providers used with import must either have literal configuration or refer only to input variables.", pv.ProviderAddr(), ref.Subject.String()), + Subject: ref.SourceRange.ToHCL().Ptr(), + }) } } } - return nil + return diags.Err() } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go index fcbff653f5..ab0ecae0a9 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go @@ -2,6 +2,10 @@ package terraform import ( "fmt" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/tfdiags" ) // ImportStateTransformer is a GraphTransformer that adds nodes to the @@ -11,64 +15,68 @@ type ImportStateTransformer struct { } func (t *ImportStateTransformer) Transform(g *Graph) error { - nodes := make([]*graphNodeImportState, 0, len(t.Targets)) for _, target := range t.Targets { - addr, err := ParseResourceAddress(target.Addr) - if err != nil { - return fmt.Errorf( - "failed to parse resource address '%s': %s", - target.Addr, err) + // The ProviderAddr may not be supplied for non-aliased providers. + // This will be populated if the targets come from the cli, but tests + // may not specify implied provider addresses. + providerAddr := target.ProviderAddr + if providerAddr.ProviderConfig.Type == "" { + providerAddr = target.Addr.Resource.Resource.DefaultProviderConfig().Absolute(target.Addr.Module) } - nodes = append(nodes, &graphNodeImportState{ - Addr: addr, + node := &graphNodeImportState{ + Addr: target.Addr, ID: target.ID, - ProviderName: target.Provider, - }) - } - - // Build the graph vertices - for _, n := range nodes { - g.Add(n) + ProviderAddr: providerAddr, + } + g.Add(node) } - return nil } type graphNodeImportState struct { - Addr *ResourceAddress // Addr is the resource address to import to - ID string // ID is the ID to import as - ProviderName string // Provider string - ResolvedProvider string // provider node address + Addr addrs.AbsResourceInstance // Addr is the resource address to import into + ID string // ID is the ID to import as + ProviderAddr addrs.AbsProviderConfig // Provider address given by the user, or implied by the resource type + ResolvedProvider addrs.AbsProviderConfig // provider node address after resolution - states []*InstanceState + states []providers.ImportedResource } +var ( + _ GraphNodeSubPath = (*graphNodeImportState)(nil) + _ GraphNodeEvalable = (*graphNodeImportState)(nil) + _ GraphNodeProviderConsumer = (*graphNodeImportState)(nil) + _ GraphNodeDynamicExpandable = (*graphNodeImportState)(nil) +) + func (n *graphNodeImportState) Name() string { - return fmt.Sprintf("%s (import id: %s)", n.Addr, n.ID) + return fmt.Sprintf("%s (import id %q)", n.Addr, n.ID) } -func (n *graphNodeImportState) ProvidedBy() string { - return resourceProvider(n.Addr.Type, n.ProviderName) +// GraphNodeProviderConsumer +func (n *graphNodeImportState) ProvidedBy() (addrs.AbsProviderConfig, bool) { + // We assume that n.ProviderAddr has been properly populated here. + // It's the responsibility of the code creating a graphNodeImportState + // to populate this, possibly by calling DefaultProviderConfig() on the + // resource address to infer an implied provider from the resource type + // name. + return n.ProviderAddr, false } -func (n *graphNodeImportState) SetProvider(p string) { - n.ResolvedProvider = p +// GraphNodeProviderConsumer +func (n *graphNodeImportState) SetProvider(addr addrs.AbsProviderConfig) { + n.ResolvedProvider = addr } // GraphNodeSubPath -func (n *graphNodeImportState) Path() []string { - return normalizeModulePath(n.Addr.Path) +func (n *graphNodeImportState) Path() addrs.ModuleInstance { + return n.Addr.Module } // GraphNodeEvalable impl. func (n *graphNodeImportState) EvalTree() EvalNode { - var provider ResourceProvider - info := &InstanceInfo{ - Id: fmt.Sprintf("%s.%s", n.Addr.Type, n.Addr.Name), - ModulePath: n.Path(), - Type: n.Addr.Type, - } + var provider providers.Interface // Reset our states n.states = nil @@ -77,13 +85,13 @@ func (n *graphNodeImportState) EvalTree() EvalNode { return &EvalSequence{ Nodes: []EvalNode{ &EvalGetProvider{ - Name: n.ResolvedProvider, + Addr: n.ResolvedProvider, Output: &provider, }, &EvalImportState{ + Addr: n.Addr.Resource, Provider: &provider, - Info: info, - Id: n.ID, + ID: n.ID, Output: &n.states, }, }, @@ -97,6 +105,8 @@ func (n *graphNodeImportState) EvalTree() EvalNode { // resources they don't depend on anything else and refreshes are isolated // so this is nearly a perfect use case for dynamic expand. func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) { + var diags tfdiags.Diagnostics + g := &Graph{Path: ctx.Path()} // nameCounter is used to de-dup names in the state. @@ -105,11 +115,11 @@ func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) { // Compile the list of addresses that we'll be inserting into the state. // We do this ahead of time so we can verify that we aren't importing // something that already exists. - addrs := make([]*ResourceAddress, len(n.states)) + addrs := make([]addrs.AbsResourceInstance, len(n.states)) for i, state := range n.states { - addr := *n.Addr - if t := state.Ephemeral.Type; t != "" { - addr.Type = t + addr := n.Addr + if t := state.TypeName; t != "" { + addr.Resource.Resource.Type = t } // Determine if we need to suffix the name to de-dup @@ -117,36 +127,31 @@ func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) { count, ok := nameCounter[key] if ok { count++ - addr.Name += fmt.Sprintf("-%d", count) + addr.Resource.Resource.Name += fmt.Sprintf("-%d", count) } nameCounter[key] = count // Add it to our list - addrs[i] = &addr + addrs[i] = addr } // Verify that all the addresses are clear - state, lock := ctx.State() - lock.RLock() - defer lock.RUnlock() - filter := &StateFilter{State: state} + state := ctx.State() for _, addr := range addrs { - result, err := filter.Filter(addr.String()) - if err != nil { - return nil, fmt.Errorf("Error verifying address %s: %s", addr, err) - } - - // Go through the filter results and it is an error if we find - // a matching InstanceState, meaning that we would have a collision. - for _, r := range result { - if _, ok := r.Value.(*InstanceState); ok { - return nil, fmt.Errorf( - "Can't import %s, would collide with an existing resource.\n\n"+ - "Please remove or rename this resource before continuing.", - addr) - } + existing := state.ResourceInstance(addr) + if existing != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Resource already managed by Terraform", + fmt.Sprintf("Terraform is already managing a remote object for %s. To import to this address you must first remove the existing object from the state.", addr), + )) + continue } } + if diags.HasErrors() { + // Bail out early, then. + return nil, diags.Err() + } // For each of the states, we add a node to handle the refresh/add to state. // "n.states" is populated by our own EvalTree with the result of @@ -154,10 +159,8 @@ func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) { // is safe. for i, state := range n.states { g.Add(&graphNodeImportStateSub{ - Target: addrs[i], - Path_: n.Path(), + TargetAddr: addrs[i], State: state, - ProviderName: n.ProviderName, ResolvedProvider: n.ResolvedProvider, }) } @@ -169,79 +172,67 @@ func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) { } // Done! - return g, nil + return g, diags.Err() } // graphNodeImportStateSub is the sub-node of graphNodeImportState // and is part of the subgraph. This node is responsible for refreshing // and adding a resource to the state once it is imported. type graphNodeImportStateSub struct { - Target *ResourceAddress - State *InstanceState - Path_ []string - ProviderName string - ResolvedProvider string + TargetAddr addrs.AbsResourceInstance + State providers.ImportedResource + ResolvedProvider addrs.AbsProviderConfig } +var ( + _ GraphNodeSubPath = (*graphNodeImportStateSub)(nil) + _ GraphNodeEvalable = (*graphNodeImportStateSub)(nil) +) + func (n *graphNodeImportStateSub) Name() string { - return fmt.Sprintf("import %s result: %s", n.Target, n.State.ID) + return fmt.Sprintf("import %s result", n.TargetAddr) } -func (n *graphNodeImportStateSub) Path() []string { - return n.Path_ +func (n *graphNodeImportStateSub) Path() addrs.ModuleInstance { + return n.TargetAddr.Module } // GraphNodeEvalable impl. func (n *graphNodeImportStateSub) EvalTree() EvalNode { // If the Ephemeral type isn't set, then it is an error - if n.State.Ephemeral.Type == "" { - err := fmt.Errorf( - "import of %s didn't set type for %s", - n.Target.String(), n.State.ID) + if n.State.TypeName == "" { + err := fmt.Errorf("import of %s didn't set type", n.TargetAddr.String()) return &EvalReturnError{Error: &err} } - // DeepCopy so we're only modifying our local copy - state := n.State.DeepCopy() + state := n.State.AsInstanceObject() - // Build the resource info - info := &InstanceInfo{ - Id: fmt.Sprintf("%s.%s", n.Target.Type, n.Target.Name), - ModulePath: n.Path_, - Type: n.State.Ephemeral.Type, - } - - // Key is the resource key - key := &ResourceStateKey{ - Name: n.Target.Name, - Type: info.Type, - Index: n.Target.Index, - } - - // The eval sequence - var provider ResourceProvider + var provider providers.Interface + var providerSchema *ProviderSchema return &EvalSequence{ Nodes: []EvalNode{ &EvalGetProvider{ - Name: n.ResolvedProvider, + Addr: n.ResolvedProvider, Output: &provider, + Schema: &providerSchema, }, &EvalRefresh{ - Provider: &provider, - State: &state, - Info: info, - Output: &state, + Addr: n.TargetAddr.Resource, + ProviderAddr: n.ResolvedProvider, + Provider: &provider, + ProviderSchema: &providerSchema, + State: &state, + Output: &state, }, &EvalImportStateVerify{ - Info: info, - Id: n.State.ID, + Addr: n.TargetAddr.Resource, State: &state, }, &EvalWriteState{ - Name: key.String(), - ResourceType: info.Type, - Provider: n.ResolvedProvider, - State: &state, + Addr: n.TargetAddr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, }, }, } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_local.go b/vendor/github.com/hashicorp/terraform/terraform/transform_local.go index 95ecfc0a44..84eb26b260 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_local.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_local.go @@ -1,37 +1,45 @@ package terraform import ( - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/configs" ) // LocalTransformer is a GraphTransformer that adds all the local values // from the configuration to the graph. type LocalTransformer struct { - Module *module.Tree + Config *configs.Config } func (t *LocalTransformer) Transform(g *Graph) error { - return t.transformModule(g, t.Module) + return t.transformModule(g, t.Config) } -func (t *LocalTransformer) transformModule(g *Graph, m *module.Tree) error { - if m == nil { +func (t *LocalTransformer) transformModule(g *Graph, c *configs.Config) error { + if c == nil { // Can't have any locals if there's no config return nil } - for _, local := range m.Config().Locals { + // Our addressing system distinguishes between modules and module instances, + // but we're not yet ready to make that distinction here (since we don't + // support "count"/"for_each" on modules) and so we just do a naive + // transform of the module path into a module instance path, assuming that + // no keys are in use. This should be removed when "count" and "for_each" + // are implemented for modules. + path := c.Path.UnkeyedInstanceShim() + + for _, local := range c.Module.Locals { + addr := path.LocalValue(local.Name) node := &NodeLocal{ - PathValue: normalizeModulePath(m.Path()), - Config: local, + Addr: addr, + Config: local, } - g.Add(node) } // Also populate locals for child modules - for _, c := range m.Children() { - if err := t.transformModule(g, c); err != nil { + for _, cc := range c.Children { + if err := t.transformModule(g, cc); err != nil { return err } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go index 467950bdc6..a994bd4faf 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go @@ -1,46 +1,54 @@ package terraform import ( - "log" + "fmt" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/configs" ) // ModuleVariableTransformer is a GraphTransformer that adds all the variables // in the configuration to the graph. // -// This only adds variables that are referenced by other things in the graph. -// If a module variable is not referenced, it won't be added to the graph. +// Any "variable" block present in any non-root module is included here, even +// if a particular variable is not referenced from anywhere. +// +// The transform will produce errors if a call to a module does not conform +// to the expected set of arguments, but this transformer is not in a good +// position to return errors and so the validate walk should include specific +// steps for validating module blocks, separate from this transform. type ModuleVariableTransformer struct { - Module *module.Tree - - DisablePrune bool // True if pruning unreferenced should be disabled + Config *configs.Config } func (t *ModuleVariableTransformer) Transform(g *Graph) error { - return t.transform(g, nil, t.Module) + return t.transform(g, nil, t.Config) } -func (t *ModuleVariableTransformer) transform(g *Graph, parent, m *module.Tree) error { - // If no config, no variables - if m == nil { +func (t *ModuleVariableTransformer) transform(g *Graph, parent, c *configs.Config) error { + // We can have no variables if we have no configuration. + if c == nil { return nil } - // Transform all the children. This must be done BEFORE the transform - // above since child module variables can reference parent module variables. - for _, c := range m.Children() { - if err := t.transform(g, m, c); err != nil { + // Transform all the children first. + for _, cc := range c.Children { + if err := t.transform(g, c, cc); err != nil { return err } } + // If we're processing anything other than the root module then we'll + // add graph nodes for variables defined inside. (Variables for the root + // module are dealt with in RootVariableTransformer). // If we have a parent, we can determine if a module variable is being // used, so we transform this. if parent != nil { - if err := t.transformSingle(g, parent, m); err != nil { + if err := t.transformSingle(g, parent, c); err != nil { return err } } @@ -48,71 +56,69 @@ func (t *ModuleVariableTransformer) transform(g *Graph, parent, m *module.Tree) return nil } -func (t *ModuleVariableTransformer) transformSingle(g *Graph, parent, m *module.Tree) error { - // If we have no vars, we're done! - vars := m.Config().Variables - if len(vars) == 0 { - log.Printf("[TRACE] Module %#v has no variables, skipping.", m.Path()) - return nil +func (t *ModuleVariableTransformer) transformSingle(g *Graph, parent, c *configs.Config) error { + + // Our addressing system distinguishes between modules and module instances, + // but we're not yet ready to make that distinction here (since we don't + // support "count"/"for_each" on modules) and so we just do a naive + // transform of the module path into a module instance path, assuming that + // no keys are in use. This should be removed when "count" and "for_each" + // are implemented for modules. + path := c.Path.UnkeyedInstanceShim() + _, call := path.Call() + + // Find the call in the parent module configuration, so we can get the + // expressions given for each input variable at the call site. + callConfig, exists := parent.Module.ModuleCalls[call.Name] + if !exists { + // This should never happen, since it indicates an improperly-constructed + // configuration tree. + panic(fmt.Errorf("no module call block found for %s", path)) } - // Look for usage of this module - var mod *config.Module - for _, modUse := range parent.Config().Modules { - if modUse.Name == m.Name() { - mod = modUse - break - } + // We need to construct a schema for the expected call arguments based on + // the configured variables in our config, which we can then use to + // decode the content of the call block. + schema := &hcl.BodySchema{} + for _, v := range c.Module.Variables { + schema.Attributes = append(schema.Attributes, hcl.AttributeSchema{ + Name: v.Name, + Required: v.Default == cty.NilVal, + }) } - if mod == nil { - log.Printf("[INFO] Module %#v not used, not adding variables", m.Path()) - return nil + + content, contentDiags := callConfig.Config.Content(schema) + if contentDiags.HasErrors() { + // Validation code elsewhere should deal with any errors before we + // get in here, but we'll report them out here just in case, to + // avoid crashes. + var diags tfdiags.Diagnostics + diags = diags.Append(contentDiags) + return diags.Err() } - // Build the reference map so we can determine if we're referencing things. - refMap := NewReferenceMap(g.Vertices()) - - // Add all variables here - for _, v := range vars { - // Determine the value of the variable. If it isn't in the - // configuration then it was never set and that's not a problem. - var value *config.RawConfig - if raw, ok := mod.RawConfig.Raw[v.Name]; ok { - var err error - value, err = config.NewRawConfig(map[string]interface{}{ - v.Name: raw, - }) - if err != nil { - // This shouldn't happen because it is already in - // a RawConfig above meaning it worked once before. - panic(err) + for _, v := range c.Module.Variables { + var expr hcl.Expression + if attr := content.Attributes[v.Name]; attr != nil { + expr = attr.Expr + } else { + // No expression provided for this variable, so we'll make a + // synthetic one using the variable's default value. + expr = &hclsyntax.LiteralValueExpr{ + Val: v.Default, + SrcRange: v.DeclRange, // This is not exact, but close enough } } - // Build the node. - // - // NOTE: For now this is just an "applyable" variable. As we build - // new graph builders for the other operations I suspect we'll - // find a way to parameterize this, require new transforms, etc. + // For now we treat all module variables as "applyable", even though + // such nodes are valid to use on other walks too. We may specialize + // this in future if we find reasons to employ different behaviors + // in different scenarios. node := &NodeApplyableModuleVariable{ - PathValue: normalizeModulePath(m.Path()), - Config: v, - Value: value, - Module: t.Module, + Addr: path.InputVariable(v.Name), + Config: v, + Expr: expr, } - - if !t.DisablePrune { - // If the node is not referenced by anything, then we don't need - // to include it since it won't be used. - if matches := refMap.ReferencedBy(node); len(matches) == 0 { - log.Printf( - "[INFO] Not including %q in graph, nothing depends on it", - dag.VertexName(node)) - continue - } - } - - // Add it! g.Add(node) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go index b256a25b7b..eec762e55a 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go @@ -3,7 +3,9 @@ package terraform import ( "log" + "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/states" ) // OrphanResourceCountTransformer is a GraphTransformer that adds orphans @@ -14,95 +16,106 @@ import ( // This transform assumes that if an element in the state is within the count // bounds given, that it is not an orphan. type OrphanResourceCountTransformer struct { - Concrete ConcreteResourceNodeFunc + Concrete ConcreteResourceInstanceNodeFunc - Count int // Actual count of the resource - Addr *ResourceAddress // Addr of the resource to look for orphans - State *State // Full global state + Count int // Actual count of the resource, or -1 if count is not set at all + Addr addrs.AbsResource // Addr of the resource to look for orphans + State *states.State // Full global state } func (t *OrphanResourceCountTransformer) Transform(g *Graph) error { - log.Printf("[TRACE] OrphanResourceCount: Starting...") + rs := t.State.Resource(t.Addr) + if rs == nil { + return nil // Resource doesn't exist in state, so nothing to do! + } - // Grab the module in the state just for this resource address - ms := t.State.ModuleByPath(normalizeModulePath(t.Addr.Path)) - if ms == nil { - // If no state, there can't be orphans - return nil + haveKeys := make(map[addrs.InstanceKey]struct{}) + for key := range rs.Instances { + haveKeys[key] = struct{}{} } - orphanIndex := -1 - if t.Count == 1 { - orphanIndex = 0 + if t.Count < 0 { + return t.transformNoCount(haveKeys, g) + } + if t.Count == 0 { + return t.transformZeroCount(haveKeys, g) } + return t.transformCount(haveKeys, g) +} + +func (t *OrphanResourceCountTransformer) transformCount(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error { + // Due to the logic in Transform, we only get in here if our count is + // at least one. - // Go through the orphans and add them all to the state - for key, _ := range ms.Resources { - // Build the address - addr, err := parseResourceAddressInternal(key) - if err != nil { - return err + _, have0Key := haveKeys[addrs.IntKey(0)] + + for key := range haveKeys { + if key == addrs.NoKey && !have0Key { + // If we have no 0-key then we will accept a no-key instance + // as an alias for it. + continue } - addr.Path = ms.Path[1:] - // Copy the address for comparison. If we aren't looking at - // the same resource, then just ignore it. - addrCopy := addr.Copy() - addrCopy.Index = -1 - if !addrCopy.Equals(t.Addr) { + i, isInt := key.(addrs.IntKey) + if isInt && int(i) < t.Count { continue } - log.Printf("[TRACE] OrphanResourceCount: Checking: %s", addr) + abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key)) + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + log.Printf("[TRACE] OrphanResourceCount(non-zero): adding %s as %T", t.Addr, node) + g.Add(node) + } + + return nil +} - idx := addr.Index +func (t *OrphanResourceCountTransformer) transformZeroCount(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error { + // This case is easy: we need to orphan any keys we have at all. - // If we have zero and the index here is 0 or 1, then we - // change the index to a high number so that we treat it as - // an orphan. - if t.Count <= 0 && idx <= 0 { - idx = t.Count + 1 + for key := range haveKeys { + abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key)) + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) } + log.Printf("[TRACE] OrphanResourceCount(zero): adding %s as %T", t.Addr, node) + g.Add(node) + } - // If we have a count greater than 0 and we're at the zero index, - // we do a special case check to see if our state also has a - // -1 index value. If so, this is an orphan because our rules are - // that if both a -1 and 0 are in the state, the 0 is destroyed. - if t.Count > 0 && idx == orphanIndex { - // This is a piece of cleverness (beware), but its simple: - // if orphanIndex is 0, then check -1, else check 0. - checkIndex := (orphanIndex + 1) * -1 - - key := &ResourceStateKey{ - Name: addr.Name, - Type: addr.Type, - Mode: addr.Mode, - Index: checkIndex, - } - - if _, ok := ms.Resources[key.String()]; ok { - // We have a -1 index, too. Make an arbitrarily high - // index so that we always mark this as an orphan. - log.Printf( - "[WARN] OrphanResourceCount: %q both -1 and 0 index found, orphaning %d", - addr, orphanIndex) - idx = t.Count + 1 - } - } + return nil +} - // If the index is within the count bounds, it is not an orphan - if idx < t.Count { +func (t *OrphanResourceCountTransformer) transformNoCount(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error { + // Negative count indicates that count is not set at all, in which + // case we expect to have a single instance with no key set at all. + // However, we'll also accept an instance with key 0 set as an alias + // for it, in case the user has just deleted the "count" argument and + // so wants to keep the first instance in the set. + + _, haveNoKey := haveKeys[addrs.NoKey] + _, have0Key := haveKeys[addrs.IntKey(0)] + keepKey := addrs.NoKey + if have0Key && !haveNoKey { + // If we don't have a no-key instance then we can use the 0-key instance + // instead. + keepKey = addrs.IntKey(0) + } + + for key := range haveKeys { + if key == keepKey { continue } - // Build the abstract node and the concrete one - abstract := &NodeAbstractResource{Addr: addr} + abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key)) var node dag.Vertex = abstract if f := t.Concrete; f != nil { node = f(abstract) } - - // Add it to the graph + log.Printf("[TRACE] OrphanResourceCount(no-count): adding %s as %T", t.Addr, node) g.Add(node) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go index aea2bd0ed7..c675409346 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go @@ -3,16 +3,17 @@ package terraform import ( "log" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/states" ) // OrphanOutputTransformer finds the outputs that aren't present // in the given config that are in the state and adds them to the graph // for deletion. type OrphanOutputTransformer struct { - Module *module.Tree // Root module - State *State // State is the root state + Config *configs.Config // Root of config tree + State *states.State // State is the root state } func (t *OrphanOutputTransformer) Transform(g *Graph) error { @@ -29,24 +30,30 @@ func (t *OrphanOutputTransformer) Transform(g *Graph) error { return nil } -func (t *OrphanOutputTransformer) transform(g *Graph, ms *ModuleState) error { +func (t *OrphanOutputTransformer) transform(g *Graph, ms *states.Module) error { if ms == nil { return nil } - path := normalizeModulePath(ms.Path) + moduleAddr := ms.Addr // Get the config for this path, which is nil if the entire module has been // removed. - var c *config.Config - if m := t.Module.Child(path[1:]); m != nil { - c = m.Config() + var outputs map[string]*configs.Output + if c := t.Config.DescendentForInstance(moduleAddr); c != nil { + outputs = c.Module.Outputs } - // add all the orphaned outputs to the graph - for _, n := range ms.RemovedOutputs(c) { - g.Add(&NodeOutputOrphan{OutputName: n, PathValue: path}) + // An output is "orphaned" if it's present in the state but not declared + // in the configuration. + for name := range ms.OutputValues { + if _, exists := outputs[name]; exists { + continue + } + g.Add(&NodeOutputOrphan{ + Addr: addrs.OutputValue{Name: name}.Absolute(moduleAddr), + }) } return nil diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go index e42d3c8495..50df1781ec 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go @@ -1,34 +1,43 @@ package terraform import ( - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" + "log" + + "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/states" ) -// OrphanResourceTransformer is a GraphTransformer that adds resource -// orphans to the graph. A resource orphan is a resource that is -// represented in the state but not in the configuration. -// -// This only adds orphans that have no representation at all in the +// OrphanResourceInstanceTransformer is a GraphTransformer that adds orphaned +// resource instances to the graph. An "orphan" is an instance that is present +// in the state but belongs to a resource that is no longer present in the // configuration. -type OrphanResourceTransformer struct { - Concrete ConcreteResourceNodeFunc +// +// This is not the transformer that deals with "count orphans" (instances that +// are no longer covered by a resource's "count" or "for_each" setting); that's +// handled instead by OrphanResourceCountTransformer. +type OrphanResourceInstanceTransformer struct { + Concrete ConcreteResourceInstanceNodeFunc // State is the global state. We require the global state to // properly find module orphans at our path. - State *State + State *states.State - // Module is the root module. We'll look up the proper configuration - // using the graph path. - Module *module.Tree + // Config is the root node in the configuration tree. We'll look up + // the appropriate note in this tree using the path in each node. + Config *configs.Config } -func (t *OrphanResourceTransformer) Transform(g *Graph) error { +func (t *OrphanResourceInstanceTransformer) Transform(g *Graph) error { if t.State == nil { // If the entire state is nil, there can't be any orphans return nil } + if t.Config == nil { + // Should never happen: we can't be doing any Terraform operations + // without at least an empty configuration. + panic("OrphanResourceInstanceTransformer used without setting Config") + } // Go through the modules and for each module transform in order // to add the orphan. @@ -41,38 +50,130 @@ func (t *OrphanResourceTransformer) Transform(g *Graph) error { return nil } -func (t *OrphanResourceTransformer) transform(g *Graph, ms *ModuleState) error { +func (t *OrphanResourceInstanceTransformer) transform(g *Graph, ms *states.Module) error { if ms == nil { return nil } - // Get the configuration for this path. The configuration might be + moduleAddr := ms.Addr + + // Get the configuration for this module. The configuration might be // nil if the module was removed from the configuration. This is okay, // this just means that every resource is an orphan. - var c *config.Config - if m := t.Module.Child(ms.Path[1:]); m != nil { - c = m.Config() + var m *configs.Module + if c := t.Config.DescendentForInstance(moduleAddr); c != nil { + m = c.Module } - // Go through the orphans and add them all to the state - for _, key := range ms.Orphans(c) { - // Build the abstract resource - addr, err := parseResourceAddressInternal(key) - if err != nil { - return err + // An "orphan" is a resource that is in the state but not the configuration, + // so we'll walk the state resources and try to correlate each of them + // with a configuration block. Each orphan gets a node in the graph whose + // type is decided by t.Concrete. + // + // We don't handle orphans related to changes in the "count" and "for_each" + // pseudo-arguments here. They are handled by OrphanResourceCountTransformer. + for _, rs := range ms.Resources { + if m != nil { + if r := m.ResourceByAddr(rs.Addr); r != nil { + continue + } } - addr.Path = ms.Path[1:] - // Build the abstract node and the concrete one - abstract := &NodeAbstractResource{Addr: addr} - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) + for key := range rs.Instances { + addr := rs.Addr.Instance(key).Absolute(moduleAddr) + abstract := NewNodeAbstractResourceInstance(addr) + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + log.Printf("[TRACE] OrphanResourceInstanceTransformer: adding single-instance orphan node for %s", addr) + g.Add(node) } + } + + return nil +} + +// OrphanResourceTransformer is a GraphTransformer that adds orphaned +// resources to the graph. An "orphan" is a resource that is present in +// the state but no longer present in the config. +// +// This is separate to OrphanResourceInstanceTransformer in that it deals with +// whole resources, rather than individual instances of resources. Orphan +// resource nodes are only used during apply to clean up leftover empty +// resource state skeletons, after all of the instances inside have been +// removed. +// +// This transformer will also create edges in the graph to any pre-existing +// node that creates or destroys the entire orphaned resource or any of its +// instances, to ensure that the "orphan-ness" of a resource is always dealt +// with after all other aspects of it. +type OrphanResourceTransformer struct { + Concrete ConcreteResourceNodeFunc + + // State is the global state. + State *states.State - // Add it to the graph - g.Add(node) + // Config is the root node in the configuration tree. + Config *configs.Config +} + +func (t *OrphanResourceTransformer) Transform(g *Graph) error { + if t.State == nil { + // If the entire state is nil, there can't be any orphans + return nil + } + if t.Config == nil { + // Should never happen: we can't be doing any Terraform operations + // without at least an empty configuration. + panic("OrphanResourceTransformer used without setting Config") + } + + // We'll first collect up the existing nodes for each resource so we can + // create dependency edges for any new nodes we create. + deps := map[string][]dag.Vertex{} + for _, v := range g.Vertices() { + switch tv := v.(type) { + case GraphNodeResourceInstance: + k := tv.ResourceInstanceAddr().ContainingResource().String() + deps[k] = append(deps[k], v) + case GraphNodeResource: + k := tv.ResourceAddr().String() + deps[k] = append(deps[k], v) + case GraphNodeDestroyer: + k := tv.DestroyAddr().ContainingResource().String() + deps[k] = append(deps[k], v) + } + } + + for _, ms := range t.State.Modules { + moduleAddr := ms.Addr + + mc := t.Config.DescendentForInstance(moduleAddr) // might be nil if whole module has been removed + + for _, rs := range ms.Resources { + if mc != nil { + if r := mc.Module.ResourceByAddr(rs.Addr); r != nil { + // It's in the config, so nothing to do for this one. + continue + } + } + + addr := rs.Addr.Absolute(moduleAddr) + abstract := NewNodeAbstractResource(addr) + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + log.Printf("[TRACE] OrphanResourceTransformer: adding whole-resource orphan node for %s", addr) + g.Add(node) + for _, dn := range deps[addr.String()] { + log.Printf("[TRACE] OrphanResourceTransformer: node %q depends on %q", dag.VertexName(node), dag.VertexName(dn)) + g.Connect(dag.BasicEdge(node, dn)) + } + } } return nil + } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go index faa25e412c..ed93cdb873 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_output.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go @@ -3,7 +3,7 @@ package terraform import ( "log" - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/dag" ) @@ -14,42 +14,42 @@ import ( // aren't changing since there is no downside: the state will be available // even if the dependent items aren't changing. type OutputTransformer struct { - Module *module.Tree + Config *configs.Config } func (t *OutputTransformer) Transform(g *Graph) error { - return t.transform(g, t.Module) + return t.transform(g, t.Config) } -func (t *OutputTransformer) transform(g *Graph, m *module.Tree) error { - // If no config, no outputs - if m == nil { +func (t *OutputTransformer) transform(g *Graph, c *configs.Config) error { + // If we have no config then there can be no outputs. + if c == nil { return nil } // Transform all the children. We must do this first because // we can reference module outputs and they must show up in the // reference map. - for _, c := range m.Children() { - if err := t.transform(g, c); err != nil { + for _, cc := range c.Children { + if err := t.transform(g, cc); err != nil { return err } } - // If we have no outputs, we're done! - os := m.Config().Outputs - if len(os) == 0 { - return nil - } + // Our addressing system distinguishes between modules and module instances, + // but we're not yet ready to make that distinction here (since we don't + // support "count"/"for_each" on modules) and so we just do a naive + // transform of the module path into a module instance path, assuming that + // no keys are in use. This should be removed when "count" and "for_each" + // are implemented for modules. + path := c.Path.UnkeyedInstanceShim() - // Add all outputs here - for _, o := range os { + for _, o := range c.Module.Outputs { + addr := path.OutputValue(o.Name) node := &NodeApplyableOutput{ - PathValue: normalizeModulePath(m.Path()), - Config: o, + Addr: addr, + Config: o, } - - // Add it! g.Add(node) } @@ -71,8 +71,8 @@ func (t *DestroyOutputTransformer) Transform(g *Graph) error { // create the destroy node for this output node := &NodeDestroyableOutput{ - PathValue: output.PathValue, - Config: output.Config, + Addr: output.Addr, + Config: output.Config, } log.Printf("[TRACE] creating %s", node.Name()) diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go index c4772b4019..6a4fb47c49 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go @@ -1,22 +1,21 @@ package terraform import ( - "errors" "fmt" "log" - "strings" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/tfdiags" ) -func TransformProviders(providers []string, concrete ConcreteProviderNodeFunc, mod *module.Tree) GraphTransformer { +func TransformProviders(providers []string, concrete ConcreteProviderNodeFunc, config *configs.Config) GraphTransformer { return GraphTransformMulti( // Add providers from the config &ProviderConfigTransformer{ - Module: mod, + Config: config, Providers: providers, Concrete: concrete, }, @@ -26,7 +25,9 @@ func TransformProviders(providers []string, concrete ConcreteProviderNodeFunc, m Concrete: concrete, }, // Connect the providers - &ProviderTransformer{}, + &ProviderTransformer{ + Config: config, + }, // Remove unused providers and proxies &PruneProviderTransformer{}, // Connect provider to their parent provider nodes @@ -36,10 +37,14 @@ func TransformProviders(providers []string, concrete ConcreteProviderNodeFunc, m // GraphNodeProvider is an interface that nodes that can be a provider // must implement. -// ProviderName returns the name of the provider this satisfies. +// +// ProviderAddr returns the address of the provider configuration this +// satisfies, which is relative to the path returned by method Path(). +// // Name returns the full name of the provider in the config. type GraphNodeProvider interface { - ProviderName() string + GraphNodeSubPath + ProviderAddr() addrs.AbsProviderConfig Name() string } @@ -47,62 +52,132 @@ type GraphNodeProvider interface { // provider must implement. The CloseProviderName returned is the name of // the provider they satisfy. type GraphNodeCloseProvider interface { - CloseProviderName() string + GraphNodeSubPath + CloseProviderAddr() addrs.AbsProviderConfig } // GraphNodeProviderConsumer is an interface that nodes that require -// a provider must implement. ProvidedBy must return the name of the provider -// to use. This may be a provider by type, type.alias or a fully resolved -// provider name +// a provider must implement. ProvidedBy must return the address of the provider +// to use, which will be resolved to a configuration either in the same module +// or in an ancestor module, with the resulting absolute address passed to +// SetProvider. type GraphNodeProviderConsumer interface { - ProvidedBy() string + // ProvidedBy returns the address of the provider configuration the node + // refers to. If the returned "exact" value is true, this address will + // be taken exactly. If "exact" is false, a provider configuration from + // an ancestor module may be selected instead. + ProvidedBy() (addr addrs.AbsProviderConfig, exact bool) // Set the resolved provider address for this resource. - SetProvider(string) + SetProvider(addrs.AbsProviderConfig) } // ProviderTransformer is a GraphTransformer that maps resources to // providers within the graph. This will error if there are any resources // that don't map to proper resources. -type ProviderTransformer struct{} +type ProviderTransformer struct { + Config *configs.Config +} func (t *ProviderTransformer) Transform(g *Graph) error { - // Go through the other nodes and match them to providers they need - var err error - m := providerVertexMap(g) + // We need to find a provider configuration address for each resource + // either directly represented by a node or referenced by a node in + // the graph, and then create graph edges from provider to provider user + // so that the providers will get initialized first. + + var diags tfdiags.Diagnostics + + // To start, we'll collect the _requested_ provider addresses for each + // node, which we'll then resolve (handling provider inheritence, etc) in + // the next step. + // Our "requested" map is from graph vertices to string representations of + // provider config addresses (for deduping) to requests. + type ProviderRequest struct { + Addr addrs.AbsProviderConfig + Exact bool // If true, inheritence from parent modules is not attempted + } + requested := map[dag.Vertex]map[string]ProviderRequest{} + needConfigured := map[string]addrs.AbsProviderConfig{} for _, v := range g.Vertices() { + + // Does the vertex _directly_ use a provider? if pv, ok := v.(GraphNodeProviderConsumer); ok { - p := pv.ProvidedBy() + requested[v] = make(map[string]ProviderRequest) - key := providerMapKey(p, pv) + p, exact := pv.ProvidedBy() + if exact { + log.Printf("[TRACE] ProviderTransformer: %s is provided by %s exactly", dag.VertexName(v), p) + } else { + log.Printf("[TRACE] ProviderTransformer: %s is provided by %s or inherited equivalent", dag.VertexName(v), p) + } + + requested[v][p.String()] = ProviderRequest{ + Addr: p, + Exact: exact, + } + + // Direct references need the provider configured as well as initialized + needConfigured[p.String()] = p + } + } + + // Now we'll go through all the requested addresses we just collected and + // figure out which _actual_ config address each belongs to, after resolving + // for provider inheritance and passing. + m := providerVertexMap(g) + for v, reqs := range requested { + for key, req := range reqs { + p := req.Addr target := m[key] - sp, ok := pv.(GraphNodeSubPath) + _, ok := v.(GraphNodeSubPath) if !ok && target == nil { - // no target, and no path to walk up - err = multierror.Append(err, fmt.Errorf( - "%s: provider %s couldn't be found", - dag.VertexName(v), p)) - break + // No target and no path to traverse up from + diags = diags.Append(fmt.Errorf("%s: provider %s couldn't be found", dag.VertexName(v), p)) + continue + } + + if target != nil { + log.Printf("[TRACE] ProviderTransformer: exact match for %s serving %s", p, dag.VertexName(v)) } - // if we don't have a provider at this level, walk up the path looking for one - for i := 1; target == nil; i++ { - path := normalizeModulePath(sp.Path()) - if len(path) < i { - break + // if we don't have a provider at this level, walk up the path looking for one, + // unless we were told to be exact. + if target == nil && !req.Exact { + for pp, ok := p.Inherited(); ok; pp, ok = pp.Inherited() { + key := pp.String() + target = m[key] + if target != nil { + log.Printf("[TRACE] ProviderTransformer: %s uses inherited configuration %s", dag.VertexName(v), pp) + break + } + log.Printf("[TRACE] ProviderTransformer: looking for %s to serve %s", pp, dag.VertexName(v)) } + } - key = ResolveProviderName(p, path[:len(path)-i]) - target = m[key] - if target != nil { - break + // If this provider doesn't need to be configured then we can just + // stub it out with an init-only provider node, which will just + // start up the provider and fetch its schema. + if _, exists := needConfigured[key]; target == nil && !exists { + stubAddr := p.ProviderConfig.Absolute(addrs.RootModuleInstance) + stub := &NodeEvalableProvider{ + &NodeAbstractProvider{ + Addr: stubAddr, + }, } + m[stubAddr.String()] = stub + log.Printf("[TRACE] ProviderTransformer: creating init-only node for %s", stubAddr) + target = stub + g.Add(target) } if target == nil { - err = multierror.Append(err, fmt.Errorf( - "%s: configuration for %s is not present; a provider configuration block is required for all operations", - dag.VertexName(v), p, + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider configuration not present", + fmt.Sprintf( + "To work with %s its original provider configuration at %s is required, but it has been removed. This occurs when a provider configuration is removed while objects created by that provider still exist in the state. Re-add the provider configuration to destroy %s, after which you can remove the provider configuration again.", + dag.VertexName(v), p, dag.VertexName(v), + ), )) break } @@ -111,16 +186,18 @@ func (t *ProviderTransformer) Transform(g *Graph) error { if p, ok := target.(*graphNodeProxyProvider); ok { g.Remove(p) target = p.Target() - key = target.(GraphNodeProvider).Name() + key = target.(GraphNodeProvider).ProviderAddr().String() } - log.Printf("[DEBUG] resource %s using provider %s", dag.VertexName(pv), key) - pv.SetProvider(key) + log.Printf("[DEBUG] ProviderTransformer: %q (%T) needs %s", dag.VertexName(v), v, dag.VertexName(target)) + if pv, ok := v.(GraphNodeProviderConsumer); ok { + pv.SetProvider(target.ProviderAddr()) + } g.Connect(dag.BasicEdge(v, target)) } } - return err + return diags.Err() } // CloseProviderTransformer is a GraphTransformer that adds nodes to the @@ -136,15 +213,16 @@ func (t *CloseProviderTransformer) Transform(g *Graph) error { for _, v := range pm { p := v.(GraphNodeProvider) + key := p.ProviderAddr().String() // get the close provider of this type if we alread created it - closer := cpm[p.Name()] + closer := cpm[key] if closer == nil { // create a closer for this provider type - closer = &graphNodeCloseProvider{ProviderNameValue: p.Name()} + closer = &graphNodeCloseProvider{Addr: p.ProviderAddr()} g.Add(closer) - cpm[p.Name()] = closer + cpm[key] = closer } // Close node depends on the provider itself @@ -164,10 +242,20 @@ func (t *CloseProviderTransformer) Transform(g *Graph) error { return err } -// MissingProviderTransformer is a GraphTransformer that adds nodes for all -// required providers into the graph. Specifically, it creates provider -// configuration nodes for all the providers that we support. These are pruned -// later during an optimization pass. +// MissingProviderTransformer is a GraphTransformer that adds to the graph +// a node for each default provider configuration that is referenced by another +// node but not already present in the graph. +// +// These "default" nodes are always added to the root module, regardless of +// where they are requested. This is important because our inheritance +// resolution behavior in ProviderTransformer will then treat these as a +// last-ditch fallback after walking up the tree, rather than preferring them +// as it would if they were placed in the same module as the requester. +// +// This transformer may create extra nodes that are not needed in practice, +// due to overriding provider configurations in child modules. +// PruneProviderTransformer can then remove these once ProviderTransformer +// has resolved all of the inheritence, etc. type MissingProviderTransformer struct { // Providers is the list of providers we support. Providers []string @@ -192,34 +280,40 @@ func (t *MissingProviderTransformer) Transform(g *Graph) error { continue } - p := pv.ProvidedBy() - // this may be the resolved provider from the state, so we need to get - // the base provider name. - parts := strings.SplitAfter(p, "provider.") - p = parts[len(parts)-1] + // For our work here we actually care only about the provider type and + // we plan to place all default providers in the root module, and so + // it's safe for us to rely on ProvidedBy here rather than waiting for + // the later proper resolution of provider inheritance done by + // ProviderTransformer. + p, _ := pv.ProvidedBy() + if p.ProviderConfig.Alias != "" { + // We do not create default aliased configurations. + log.Println("[TRACE] MissingProviderTransformer: skipping implication of aliased config", p) + continue + } - key := ResolveProviderName(p, nil) + // We're going to create an implicit _default_ configuration for the + // referenced provider type in the _root_ module, ignoring all other + // aspects of the resource's declared provider address. + defaultAddr := addrs.RootModuleInstance.ProviderConfigDefault(p.ProviderConfig.Type) + key := defaultAddr.String() provider := m[key] - // we already have it if provider != nil { + // There's already an explicit default configuration for this + // provider type in the root module, so we have nothing to do. continue } - // we don't implicitly create aliased providers - if strings.Contains(p, ".") { - log.Println("[DEBUG] not adding missing provider alias:", p) - continue - } - - log.Println("[DEBUG] adding missing provider:", p) + log.Printf("[DEBUG] adding implicit provider configuration %s, implied first by %s", defaultAddr, dag.VertexName(v)) - // create the misisng top-level provider + // create the missing top-level provider provider = t.Concrete(&NodeAbstractProvider{ - NameValue: p, - }).(dag.Vertex) + Addr: defaultAddr, + }).(GraphNodeProvider) - m[key] = g.Add(provider) + g.Add(provider) + m[key] = provider } return err @@ -237,26 +331,26 @@ func (t *ParentProviderTransformer) Transform(g *Graph) error { for _, v := range g.Vertices() { // Only care about providers pn, ok := v.(GraphNodeProvider) - if !ok || pn.ProviderName() == "" { + if !ok { continue } - // Also require a subpath, if there is no subpath then we - // can't have a parent. - if pn, ok := v.(GraphNodeSubPath); ok { - if len(normalizeModulePath(pn.Path())) <= 1 { - continue - } + // Also require non-empty path, since otherwise we're in the root + // module and so cannot have a parent. + if len(pn.Path()) <= 1 { + continue } // this provider may be disabled, but we can only get it's name from // the ProviderName string - name := ResolveProviderName(strings.SplitN(pn.ProviderName(), " ", 2)[0], nil) - parent := pm[name] - if parent != nil { - g.Connect(dag.BasicEdge(v, parent)) + addr := pn.ProviderAddr() + parentAddr, ok := addr.Inherited() + if ok { + parent := pm[parentAddr.String()] + if parent != nil { + g.Connect(dag.BasicEdge(v, parent)) + } } - } return nil } @@ -270,20 +364,20 @@ type PruneProviderTransformer struct{} func (t *PruneProviderTransformer) Transform(g *Graph) error { for _, v := range g.Vertices() { // We only care about providers - pn, ok := v.(GraphNodeProvider) - if !ok || pn.ProviderName() == "" { + _, ok := v.(GraphNodeProvider) + if !ok { continue } // ProxyProviders will have up edges, but we're now done with them in the graph if _, ok := v.(*graphNodeProxyProvider); ok { - log.Printf("[DEBUG] pruning proxy provider %s", dag.VertexName(v)) + log.Printf("[DEBUG] pruning proxy %s", dag.VertexName(v)) g.Remove(v) } // Remove providers with no dependencies. if g.UpEdges(v).Len() == 0 { - log.Printf("[DEBUG] pruning unused provider %s", dag.VertexName(v)) + log.Printf("[DEBUG] pruning unused %s", dag.VertexName(v)) g.Remove(v) } } @@ -291,40 +385,24 @@ func (t *PruneProviderTransformer) Transform(g *Graph) error { return nil } -// providerMapKey is a helper that gives us the key to use for the -// maps returned by things such as providerVertexMap. -func providerMapKey(k string, v dag.Vertex) string { - if strings.Contains(k, "provider.") { - // this is already resolved - return k - } - - // we create a dummy provider to - var path []string - if sp, ok := v.(GraphNodeSubPath); ok { - path = normalizeModulePath(sp.Path()) - } - return ResolveProviderName(k, path) -} - -func providerVertexMap(g *Graph) map[string]dag.Vertex { - m := make(map[string]dag.Vertex) +func providerVertexMap(g *Graph) map[string]GraphNodeProvider { + m := make(map[string]GraphNodeProvider) for _, v := range g.Vertices() { if pv, ok := v.(GraphNodeProvider); ok { - // TODO: The Name may have meta info, like " (disabled)" - name := strings.SplitN(pv.Name(), " ", 2)[0] - m[name] = v + addr := pv.ProviderAddr() + m[addr.String()] = pv } } return m } -func closeProviderVertexMap(g *Graph) map[string]dag.Vertex { - m := make(map[string]dag.Vertex) +func closeProviderVertexMap(g *Graph) map[string]GraphNodeCloseProvider { + m := make(map[string]GraphNodeCloseProvider) for _, v := range g.Vertices() { if pv, ok := v.(GraphNodeCloseProvider); ok { - m[pv.CloseProviderName()] = v + addr := pv.CloseProviderAddr() + m[addr.String()] = pv } } @@ -332,16 +410,25 @@ func closeProviderVertexMap(g *Graph) map[string]dag.Vertex { } type graphNodeCloseProvider struct { - ProviderNameValue string + Addr addrs.AbsProviderConfig } +var ( + _ GraphNodeCloseProvider = (*graphNodeCloseProvider)(nil) +) + func (n *graphNodeCloseProvider) Name() string { - return n.ProviderNameValue + " (close)" + return n.Addr.String() + " (close)" +} + +// GraphNodeSubPath impl. +func (n *graphNodeCloseProvider) Path() addrs.ModuleInstance { + return n.Addr.Module } // GraphNodeEvalable impl. func (n *graphNodeCloseProvider) EvalTree() EvalNode { - return CloseProviderEvalTree(n.ProviderNameValue) + return CloseProviderEvalTree(n.Addr) } // GraphNodeDependable impl. @@ -349,8 +436,8 @@ func (n *graphNodeCloseProvider) DependableName() []string { return []string{n.Name()} } -func (n *graphNodeCloseProvider) CloseProviderName() string { - return n.ProviderNameValue +func (n *graphNodeCloseProvider) CloseProviderAddr() addrs.AbsProviderConfig { + return n.Addr } // GraphNodeDotter impl. @@ -380,17 +467,24 @@ func (n *graphNodeCloseProvider) RemoveIfNotTargeted() bool { // configurations, and are removed after all the resources have been connected // to their providers. type graphNodeProxyProvider struct { - nameValue string - path []string - target GraphNodeProvider + addr addrs.AbsProviderConfig + target GraphNodeProvider +} + +var ( + _ GraphNodeProvider = (*graphNodeProxyProvider)(nil) +) + +func (n *graphNodeProxyProvider) ProviderAddr() addrs.AbsProviderConfig { + return n.addr } -func (n *graphNodeProxyProvider) ProviderName() string { - return n.Target().ProviderName() +func (n *graphNodeProxyProvider) Path() addrs.ModuleInstance { + return n.addr.Module } func (n *graphNodeProxyProvider) Name() string { - return ResolveProviderName(n.nameValue, n.path) + return n.addr.String() + " (proxy)" } // find the concrete provider instance @@ -415,26 +509,21 @@ type ProviderConfigTransformer struct { // record providers that can be overriden with a proxy proxiable map[string]bool - // Module is the module to add resources from. - Module *module.Tree + // Config is the root node of the configuration tree to add providers from. + Config *configs.Config } func (t *ProviderConfigTransformer) Transform(g *Graph) error { - // If no module is given, we don't do anything - if t.Module == nil { + // If no configuration is given, we don't do anything + if t.Config == nil { return nil } - // If the module isn't loaded, that is simply an error - if !t.Module.Loaded() { - return errors.New("module must be loaded for ProviderConfigTransformer") - } - t.providers = make(map[string]GraphNodeProvider) t.proxiable = make(map[string]bool) // Start the transformation process - if err := t.transform(g, t.Module); err != nil { + if err := t.transform(g, t.Config); err != nil { return err } @@ -442,95 +531,126 @@ func (t *ProviderConfigTransformer) Transform(g *Graph) error { return t.attachProviderConfigs(g) } -func (t *ProviderConfigTransformer) transform(g *Graph, m *module.Tree) error { +func (t *ProviderConfigTransformer) transform(g *Graph, c *configs.Config) error { // If no config, do nothing - if m == nil { + if c == nil { return nil } // Add our resources - if err := t.transformSingle(g, m); err != nil { + if err := t.transformSingle(g, c); err != nil { return err } // Transform all the children. - for _, c := range m.Children() { - if err := t.transform(g, c); err != nil { + for _, cc := range c.Children { + if err := t.transform(g, cc); err != nil { return err } } return nil } -func (t *ProviderConfigTransformer) transformSingle(g *Graph, m *module.Tree) error { - log.Printf("[TRACE] ProviderConfigTransformer: Starting for path: %v", m.Path()) - - // Get the configuration for this module - conf := m.Config() - - // Build the path we're at - path := m.Path() - if len(path) > 0 { - path = append([]string{RootModuleName}, path...) +func (t *ProviderConfigTransformer) transformSingle(g *Graph, c *configs.Config) error { + // Get the module associated with this configuration tree node + mod := c.Module + staticPath := c.Path + + // We actually need a dynamic module path here, but we've not yet updated + // our graph builders enough to support expansion of module calls with + // "count" and "for_each" set, so for now we'll shim this by converting to + // a dynamic path with no keys. At the time of writing this is the only + // possible kind of dynamic path anyway. + path := make(addrs.ModuleInstance, len(staticPath)) + for i, name := range staticPath { + path[i] = addrs.ModuleInstanceStep{ + Name: name, + } } // add all providers from the configuration - for _, p := range conf.ProviderConfigs { - name := p.Name - if p.Alias != "" { - name += "." + p.Alias - } + for _, p := range mod.ProviderConfigs { + relAddr := p.Addr() + addr := relAddr.Absolute(path) - v := t.Concrete(&NodeAbstractProvider{ - NameValue: name, - PathValue: path, - }) + abstract := &NodeAbstractProvider{ + Addr: addr, + } + var v dag.Vertex + if t.Concrete != nil { + v = t.Concrete(abstract) + } else { + v = abstract + } // Add it to the graph g.Add(v) - fullName := ResolveProviderName(name, path) - t.providers[fullName] = v.(GraphNodeProvider) - t.proxiable[fullName] = len(p.RawConfig.RawMap()) == 0 + key := addr.String() + t.providers[key] = v.(GraphNodeProvider) + + // A provider configuration is "proxyable" if its configuration is + // entirely empty. This means it's standing in for a provider + // configuration that must be passed in from the parent module. + // We decide this by evaluating the config with an empty schema; + // if this succeeds, then we know there's nothing in the body. + _, diags := p.Config.Content(&hcl.BodySchema{}) + t.proxiable[key] = !diags.HasErrors() } // Now replace the provider nodes with proxy nodes if a provider was being // passed in, and create implicit proxies if there was no config. Any extra // proxies will be removed in the prune step. - return t.addProxyProviders(g, m) + return t.addProxyProviders(g, c) } -func (t *ProviderConfigTransformer) addProxyProviders(g *Graph, m *module.Tree) error { - path := m.Path() +func (t *ProviderConfigTransformer) addProxyProviders(g *Graph, c *configs.Config) error { + path := c.Path // can't add proxies at the root if len(path) == 0 { return nil } - parentPath := path[:len(path)-1] - parent := t.Module.Child(parentPath) + parentPath, callAddr := path.Call() + parent := c.Parent if parent == nil { return nil } - var parentCfg *config.Module - for _, mod := range parent.Config().Modules { - if mod.Name == m.Name() { + callName := callAddr.Name + var parentCfg *configs.ModuleCall + for name, mod := range parent.Module.ModuleCalls { + if name == callName { parentCfg = mod break } } + // We currently don't support count/for_each for modules and so we must + // shim our path and parentPath into module instances here so that the + // rest of Terraform can behave as if we do. This shimming should be + // removed later as part of implementing count/for_each for modules. + instPath := make(addrs.ModuleInstance, len(path)) + for i, name := range path { + instPath[i] = addrs.ModuleInstanceStep{Name: name} + } + parentInstPath := make(addrs.ModuleInstance, len(parentPath)) + for i, name := range parentPath { + parentInstPath[i] = addrs.ModuleInstanceStep{Name: name} + } + if parentCfg == nil { // this can't really happen during normal execution. - return fmt.Errorf("parent module config not found for %s", m.Name()) + return fmt.Errorf("parent module config not found for %s", c.Path.String()) } // Go through all the providers the parent is passing in, and add proxies to // the parent provider nodes. - for name, parentName := range parentCfg.Providers { - fullName := ResolveProviderName(name, path) - fullParentName := ResolveProviderName(parentName, parentPath) + for _, pair := range parentCfg.Providers { + fullAddr := pair.InChild.Addr().Absolute(instPath) + fullParentAddr := pair.InParent.Addr().Absolute(parentInstPath) + fullName := fullAddr.String() + fullParentName := fullParentAddr.String() parentProvider := t.providers[fullParentName] @@ -539,9 +659,8 @@ func (t *ProviderConfigTransformer) addProxyProviders(g *Graph, m *module.Tree) } proxy := &graphNodeProxyProvider{ - nameValue: name, - path: path, - target: parentProvider, + addr: fullAddr, + target: parentProvider, } concreteProvider := t.providers[fullName] @@ -553,8 +672,8 @@ func (t *ProviderConfigTransformer) addProxyProviders(g *Graph, m *module.Tree) continue } - // aliased providers can't be implicitly passed in - if strings.Contains(name, ".") { + // aliased configurations can't be implicitly passed in + if fullAddr.ProviderConfig.Alias != "" { continue } @@ -575,27 +694,19 @@ func (t *ProviderConfigTransformer) attachProviderConfigs(g *Graph) error { } // Determine what we're looking for - path := normalizeModulePath(apn.Path())[1:] - name := apn.ProviderName() - log.Printf("[TRACE] Attach provider request: %#v %s", path, name) + addr := apn.ProviderAddr() // Get the configuration. - tree := t.Module.Child(path) - if tree == nil { + mc := t.Config.DescendentForInstance(addr.Module) + if mc == nil { + log.Printf("[TRACE] ProviderConfigTransformer: no configuration available for %s", addr.String()) continue } // Go through the provider configs to find the matching config - for _, p := range tree.Config().ProviderConfigs { - // Build the name, which is "name.alias" if an alias exists - current := p.Name - if p.Alias != "" { - current += "." + p.Alias - } - - // If the configs match then attach! - if current == name { - log.Printf("[TRACE] Attaching provider config: %#v", p) + for _, p := range mc.Module.ProviderConfigs { + if p.Name == addr.ProviderConfig.Type && p.Alias == addr.ProviderConfig.Alias { + log.Printf("[TRACE] ProviderConfigTransformer: attaching to %q provider configuration from %s", dag.VertexName(v), p.DeclRange) apn.AttachProvider(p) break } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go index f49d824107..fe4cf0e9e8 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go @@ -2,6 +2,9 @@ package terraform import ( "fmt" + "log" + + "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform/dag" @@ -22,8 +25,8 @@ type GraphNodeCloseProvisioner interface { } // GraphNodeProvisionerConsumer is an interface that nodes that require -// a provisioner must implement. ProvisionedBy must return the name of the -// provisioner to use. +// a provisioner must implement. ProvisionedBy must return the names of the +// provisioners to use. type GraphNodeProvisionerConsumer interface { ProvisionedBy() []string } @@ -48,6 +51,7 @@ func (t *ProvisionerTransformer) Transform(g *Graph) error { continue } + log.Printf("[TRACE] ProvisionerTransformer: %s is provisioned by %s (%q)", dag.VertexName(v), key, dag.VertexName(m[key])) g.Connect(dag.BasicEdge(v, m[key])) } } @@ -83,12 +87,9 @@ func (t *MissingProvisionerTransformer) Transform(g *Graph) error { // If this node has a subpath, then we use that as a prefix // into our map to check for an existing provider. - var path []string + path := addrs.RootModuleInstance if sp, ok := pv.(GraphNodeSubPath); ok { - raw := normalizeModulePath(sp.Path()) - if len(raw) > len(rootModulePath) { - path = raw - } + path = sp.Path() } for _, p := range pv.ProvisionedBy() { @@ -101,7 +102,7 @@ func (t *MissingProvisionerTransformer) Transform(g *Graph) error { } if _, ok := supported[p]; !ok { - // If we don't support the provisioner type, skip it. + // If we don't support the provisioner type, we skip it. // Validation later will catch this as an error. continue } @@ -114,6 +115,7 @@ func (t *MissingProvisionerTransformer) Transform(g *Graph) error { // Add the missing provisioner node to the graph m[key] = g.Add(newV) + log.Printf("[TRACE] MissingProviderTransformer: added implicit provisioner %s, first implied by %s", key, dag.VertexName(v)) } } @@ -156,10 +158,7 @@ func (t *CloseProvisionerTransformer) Transform(g *Graph) error { func provisionerMapKey(k string, v dag.Vertex) string { pathPrefix := "" if sp, ok := v.(GraphNodeSubPath); ok { - raw := normalizeModulePath(sp.Path()) - if len(raw) > len(rootModulePath) { - pathPrefix = modulePrefixStr(raw) + "." - } + pathPrefix = sp.Path().String() + "." } return pathPrefix + k diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go index be8c7f96c3..23bc8cd208 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go @@ -3,8 +3,12 @@ package terraform import ( "fmt" "log" - "strings" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/lang" + + "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/dag" ) @@ -17,35 +21,46 @@ import ( // be referenced and other methods of referencing may still be possible (such // as by path!) type GraphNodeReferenceable interface { - // ReferenceableName is the name by which this can be referenced. - // This can be either just the type, or include the field. Example: - // "aws_instance.bar" or "aws_instance.bar.id". - ReferenceableName() []string + GraphNodeSubPath + + // ReferenceableAddrs returns a list of addresses through which this can be + // referenced. + ReferenceableAddrs() []addrs.Referenceable } // GraphNodeReferencer must be implemented by nodes that reference other // Terraform items and therefore depend on them. type GraphNodeReferencer interface { - // References are the list of things that this node references. This - // can include fields or just the type, just like GraphNodeReferenceable - // above. - References() []string + GraphNodeSubPath + + // References returns a list of references made by this node, which + // include both a referenced address and source location information for + // the reference. + References() []*addrs.Reference } -// GraphNodeReferenceGlobal is an interface that can optionally be -// implemented. If ReferenceGlobal returns true, then the References() -// and ReferenceableName() must be _fully qualified_ with "module.foo.bar" -// etc. +// GraphNodeReferenceOutside is an interface that can optionally be implemented. +// A node that implements it can specify that its own referenceable addresses +// and/or the addresses it references are in a different module than the +// node itself. +// +// Any referenceable addresses returned by ReferenceableAddrs are interpreted +// relative to the returned selfPath. // -// This allows a node to reference and be referenced by a specific name -// that may cross module boundaries. This can be very dangerous so use -// this wisely. +// Any references returned by References are interpreted relative to the +// returned referencePath. // -// The primary use case for this is module boundaries (variables coming in). -type GraphNodeReferenceGlobal interface { - // Set to true to signal that references and name are fully - // qualified. See the above docs for more information. - ReferenceGlobal() bool +// It is valid but not required for either of these paths to match what is +// returned by method Path, though if both match the main Path then there +// is no reason to implement this method. +// +// The primary use-case for this is the nodes representing module input +// variables, since their expressions are resolved in terms of their calling +// module, but they are still referenced from their own module. +type GraphNodeReferenceOutside interface { + // ReferenceOutside returns a path in which any references from this node + // are resolved. + ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) } // ReferenceTransformer is a GraphTransformer that connects all the @@ -158,75 +173,91 @@ func (t *PruneUnusedValuesTransformer) Transform(g *Graph) error { // ReferenceMap is a structure that can be used to efficiently check // for references on a graph. type ReferenceMap struct { - // m is the mapping of referenceable name to list of verticies that - // implement that name. This is built on initialization. - references map[string][]dag.Vertex - referencedBy map[string][]dag.Vertex + // vertices is a map from internal reference keys (as produced by the + // mapKey method) to one or more vertices that are identified by each key. + // + // A particular reference key might actually identify multiple vertices, + // e.g. in situations where one object is contained inside another. + vertices map[string][]dag.Vertex + + // edges is a map whose keys are a subset of the internal reference keys + // from "vertices", and whose values are the nodes that refer to each + // key. The values in this map are the referrers, while values in + // "verticies" are the referents. The keys in both cases are referents. + edges map[string][]dag.Vertex } -// References returns the list of vertices that this vertex -// references along with any missing references. -func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []string) { +// References returns the set of vertices that the given vertex refers to, +// and any referenced addresses that do not have corresponding vertices. +func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []addrs.Referenceable) { rn, ok := v.(GraphNodeReferencer) if !ok { return nil, nil } + if _, ok := v.(GraphNodeSubPath); !ok { + return nil, nil + } var matches []dag.Vertex - var missing []string - prefix := m.prefix(v) - - for _, ns := range rn.References() { - found := false - for _, n := range strings.Split(ns, "/") { - n = prefix + n - parents, ok := m.references[n] - if !ok { - continue + var missing []addrs.Referenceable + + for _, ref := range rn.References() { + subject := ref.Subject + + key := m.referenceMapKey(v, subject) + if _, exists := m.vertices[key]; !exists { + // If what we were looking for was a ResourceInstance then we + // might be in a resource-oriented graph rather than an + // instance-oriented graph, and so we'll see if we have the + // resource itself instead. + switch ri := subject.(type) { + case addrs.ResourceInstance: + subject = ri.ContainingResource() + case addrs.ResourceInstancePhase: + subject = ri.ContainingResource() } + key = m.referenceMapKey(v, subject) + } - // Mark that we found a match - found = true - - for _, p := range parents { - // don't include self-references - if p == v { - continue - } - matches = append(matches, p) + vertices := m.vertices[key] + for _, rv := range vertices { + // don't include self-references + if rv == v { + continue } - - break + matches = append(matches, rv) } - - if !found { - missing = append(missing, ns) + if len(vertices) == 0 { + missing = append(missing, ref.Subject) } } return matches, missing } -// ReferencedBy returns the list of vertices that reference the -// vertex passed in. -func (m *ReferenceMap) ReferencedBy(v dag.Vertex) []dag.Vertex { +// Referrers returns the set of vertices that refer to the given vertex. +func (m *ReferenceMap) Referrers(v dag.Vertex) []dag.Vertex { rn, ok := v.(GraphNodeReferenceable) if !ok { return nil } + sp, ok := v.(GraphNodeSubPath) + if !ok { + return nil + } var matches []dag.Vertex - prefix := m.prefix(v) - for _, n := range rn.ReferenceableName() { - n = prefix + n - children, ok := m.referencedBy[n] + for _, addr := range rn.ReferenceableAddrs() { + key := m.mapKey(sp.Path(), addr) + referrers, ok := m.edges[key] if !ok { continue } - // Make sure this isn't a self reference, which isn't included + // If the referrer set includes our own given vertex then we skip, + // since we don't want to return self-references. selfRef := false - for _, p := range children { + for _, p := range referrers { if p == v { selfRef = true break @@ -236,28 +267,77 @@ func (m *ReferenceMap) ReferencedBy(v dag.Vertex) []dag.Vertex { continue } - matches = append(matches, children...) + matches = append(matches, referrers...) } return matches } -func (m *ReferenceMap) prefix(v dag.Vertex) string { - // If the node is stating it is already fully qualified then - // we don't have to create the prefix! - if gn, ok := v.(GraphNodeReferenceGlobal); ok && gn.ReferenceGlobal() { - return "" +func (m *ReferenceMap) mapKey(path addrs.ModuleInstance, addr addrs.Referenceable) string { + return fmt.Sprintf("%s|%s", path.String(), addr.String()) +} + +// vertexReferenceablePath returns the path in which the given vertex can be +// referenced. This is the path that its results from ReferenceableAddrs +// are considered to be relative to. +// +// Only GraphNodeSubPath implementations can be referenced, so this method will +// panic if the given vertex does not implement that interface. +func (m *ReferenceMap) vertexReferenceablePath(v dag.Vertex) addrs.ModuleInstance { + sp, ok := v.(GraphNodeSubPath) + if !ok { + // Only nodes with paths can participate in a reference map. + panic(fmt.Errorf("vertexMapKey on vertex type %T which doesn't implement GraphNodeSubPath", sp)) } - // Create the prefix based on the path - var prefix string - if pn, ok := v.(GraphNodeSubPath); ok { - if path := normalizeModulePath(pn.Path()); len(path) > 1 { - prefix = modulePrefixStr(path) + "." - } + if outside, ok := v.(GraphNodeReferenceOutside); ok { + // Vertex is referenced from a different module than where it was + // declared. + path, _ := outside.ReferenceOutside() + return path + } + + // Vertex is referenced from the same module as where it was declared. + return sp.Path() +} + +// vertexReferencePath returns the path in which references _from_ the given +// vertex must be interpreted. +// +// Only GraphNodeSubPath implementations can have references, so this method +// will panic if the given vertex does not implement that interface. +func vertexReferencePath(referrer dag.Vertex) addrs.ModuleInstance { + sp, ok := referrer.(GraphNodeSubPath) + if !ok { + // Only nodes with paths can participate in a reference map. + panic(fmt.Errorf("vertexReferencePath on vertex type %T which doesn't implement GraphNodeSubPath", sp)) + } + + var path addrs.ModuleInstance + if outside, ok := referrer.(GraphNodeReferenceOutside); ok { + // Vertex makes references to objects in a different module than where + // it was declared. + _, path = outside.ReferenceOutside() + return path } - return prefix + // Vertex makes references to objects in the same module as where it + // was declared. + return sp.Path() +} + +// referenceMapKey produces keys for the "edges" map. "referrer" is the vertex +// that the reference is from, and "addr" is the address of the object being +// referenced. +// +// The result is an opaque string that includes both the address of the given +// object and the address of the module instance that object belongs to. +// +// Only GraphNodeSubPath implementations can be referrers, so this method will +// panic if the given vertex does not implement that interface. +func (m *ReferenceMap) referenceMapKey(referrer dag.Vertex, addr addrs.Referenceable) string { + path := vertexReferencePath(referrer) + return m.mapKey(path, addr) } // NewReferenceMap is used to create a new reference map for the @@ -266,83 +346,82 @@ func NewReferenceMap(vs []dag.Vertex) *ReferenceMap { var m ReferenceMap // Build the lookup table - refMap := make(map[string][]dag.Vertex) + vertices := make(map[string][]dag.Vertex) for _, v := range vs { + _, ok := v.(GraphNodeSubPath) + if !ok { + // Only nodes with paths can participate in a reference map. + continue + } + // We're only looking for referenceable nodes rn, ok := v.(GraphNodeReferenceable) if !ok { continue } + path := m.vertexReferenceablePath(v) + // Go through and cache them - prefix := m.prefix(v) - for _, n := range rn.ReferenceableName() { - n = prefix + n - refMap[n] = append(refMap[n], v) + for _, addr := range rn.ReferenceableAddrs() { + key := m.mapKey(path, addr) + vertices[key] = append(vertices[key], v) } - // If there is a path, it is always referenceable by that. For - // example, if this is a referenceable thing at path []string{"foo"}, - // then it can be referenced at "module.foo" - if pn, ok := v.(GraphNodeSubPath); ok { - for _, p := range ReferenceModulePath(pn.Path()) { - refMap[p] = append(refMap[p], v) - } + // Any node can be referenced by the address of the module it belongs + // to or any of that module's ancestors. + for _, addr := range path.Ancestors()[1:] { + // Can be referenced either as the specific call instance (with + // an instance key) or as the bare module call itself (the "module" + // block in the parent module that created the instance). + callPath, call := addr.Call() + callInstPath, callInst := addr.CallInstance() + callKey := m.mapKey(callPath, call) + callInstKey := m.mapKey(callInstPath, callInst) + vertices[callKey] = append(vertices[callKey], v) + vertices[callInstKey] = append(vertices[callInstKey], v) } } // Build the lookup table for referenced by - refByMap := make(map[string][]dag.Vertex) + edges := make(map[string][]dag.Vertex) for _, v := range vs { - // We're only looking for referenceable nodes + _, ok := v.(GraphNodeSubPath) + if !ok { + // Only nodes with paths can participate in a reference map. + continue + } + rn, ok := v.(GraphNodeReferencer) if !ok { + // We're only looking for referenceable nodes continue } // Go through and cache them - prefix := m.prefix(v) - for _, n := range rn.References() { - n = prefix + n - refByMap[n] = append(refByMap[n], v) + for _, ref := range rn.References() { + if ref.Subject == nil { + // Should never happen + panic(fmt.Sprintf("%T.References returned reference with nil subject", rn)) + } + key := m.referenceMapKey(v, ref.Subject) + edges[key] = append(edges[key], v) } } - m.references = refMap - m.referencedBy = refByMap + m.vertices = vertices + m.edges = edges return &m } -// Returns the reference name for a module path. The path "foo" would return -// "module.foo". If this is a deeply nested module, it will be every parent -// as well. For example: ["foo", "bar"] would return both "module.foo" and -// "module.foo.module.bar" -func ReferenceModulePath(p []string) []string { - p = normalizeModulePath(p) - if len(p) == 1 { - // Root, no name - return nil - } - - result := make([]string, 0, len(p)-1) - for i := len(p); i > 1; i-- { - result = append(result, modulePrefixStr(p[:i])) - } - - return result -} - // ReferencesFromConfig returns the references that a configuration has // based on the interpolated variables in a configuration. -func ReferencesFromConfig(c *config.RawConfig) []string { - var result []string - for _, v := range c.Variables { - if r := ReferenceFromInterpolatedVar(v); len(r) > 0 { - result = append(result, r...) - } +func ReferencesFromConfig(body hcl.Body, schema *configschema.Block) []*addrs.Reference { + if body == nil { + return nil } - - return result + refs, _ := lang.ReferencesInBlock(body, schema) + return refs } // ReferenceFromInterpolatedVar returns the reference from this variable, @@ -378,18 +457,31 @@ func ReferenceFromInterpolatedVar(v config.InterpolatedVariable) []string { } } -func modulePrefixStr(p []string) string { - // strip "root" - if len(p) > 0 && p[0] == rootModulePath[0] { - p = p[1:] - } - - parts := make([]string, 0, len(p)*2) - for _, p := range p { - parts = append(parts, "module", p) +// appendResourceDestroyReferences identifies resource and resource instance +// references in the given slice and appends to it the "destroy-phase" +// equivalents of those references, returning the result. +// +// This can be used in the References implementation for a node which must also +// depend on the destruction of anything it references. +func appendResourceDestroyReferences(refs []*addrs.Reference) []*addrs.Reference { + given := refs + for _, ref := range given { + switch tr := ref.Subject.(type) { + case addrs.Resource: + newRef := *ref // shallow copy + newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) + refs = append(refs, &newRef) + case addrs.ResourceInstance: + newRef := *ref // shallow copy + newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) + refs = append(refs, &newRef) + } } + return refs +} - return strings.Join(parts, ".") +func modulePrefixStr(p addrs.ModuleInstance) string { + return p.String() } func modulePrefixList(result []string, prefix string) []string { diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go b/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go index 2e05edbaaa..ee71387e24 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go @@ -3,14 +3,15 @@ package terraform import ( "log" - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/states" ) -// RemoveModuleTransformer implements GraphTransformer to add nodes indicating +// RemovedModuleTransformer implements GraphTransformer to add nodes indicating // when a module was removed from the configuration. type RemovedModuleTransformer struct { - Module *module.Tree // root module - State *State + Config *configs.Config // root node in the config tree + State *states.State } func (t *RemovedModuleTransformer) Transform(g *Graph) error { @@ -20,13 +21,13 @@ func (t *RemovedModuleTransformer) Transform(g *Graph) error { } for _, m := range t.State.Modules { - c := t.Module.Child(m.Path[1:]) - if c != nil { + cc := t.Config.DescendentForInstance(m.Addr) + if cc != nil { continue } - log.Printf("[DEBUG] module %s no longer in config\n", modulePrefixStr(m.Path)) - g.Add(&NodeModuleRemoved{PathValue: m.Path}) + log.Printf("[DEBUG] %s is no longer in configuration\n", m.Addr) + g.Add(&NodeModuleRemoved{Addr: m.Addr}) } return nil } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go index e528b37b46..11237909f3 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go @@ -1,8 +1,8 @@ package terraform import ( - "fmt" - + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/dag" ) @@ -11,41 +11,44 @@ import ( // // This assumes that the count is already interpolated. type ResourceCountTransformer struct { - Concrete ConcreteResourceNodeFunc + Concrete ConcreteResourceInstanceNodeFunc + Schema *configschema.Block + // Count is either the number of indexed instances to create, or -1 to + // indicate that count is not set at all and thus a no-key instance should + // be created. Count int - Addr *ResourceAddress + Addr addrs.AbsResource } func (t *ResourceCountTransformer) Transform(g *Graph) error { - // Don't allow the count to be negative if t.Count < 0 { - return fmt.Errorf("negative count: %d", t.Count) + // Negative count indicates that count is not set at all. + addr := t.Addr.Instance(addrs.NoKey) + + abstract := NewNodeAbstractResourceInstance(addr) + abstract.Schema = t.Schema + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + g.Add(node) + return nil } // For each count, build and add the node for i := 0; i < t.Count; i++ { - // Set the index. If our count is 1 we special case it so that - // we handle the "resource.0" and "resource" boundary properly. - index := i - if t.Count == 1 { - index = -1 - } + key := addrs.IntKey(i) + addr := t.Addr.Instance(key) - // Build the resource address - addr := t.Addr.Copy() - addr.Index = index - - // Build the abstract node and the concrete one - abstract := &NodeAbstractResource{ - Addr: addr, - } + abstract := NewNodeAbstractResourceInstance(addr) + abstract.Schema = t.Schema var node dag.Vertex = abstract if f := t.Concrete; f != nil { node = f(abstract) } - // Add it to the graph g.Add(node) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_state.go index 471cd74657..0b52347df3 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_state.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_state.go @@ -1,10 +1,9 @@ package terraform import ( - "fmt" "log" - "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/states" ) // StateTransformer is a GraphTransformer that adds the elements of @@ -13,53 +12,63 @@ import ( // This transform is used for example by the DestroyPlanGraphBuilder to ensure // that only resources that are in the state are represented in the graph. type StateTransformer struct { - Concrete ConcreteResourceNodeFunc + // ConcreteCurrent and ConcreteDeposed are used to specialize the abstract + // resource instance nodes that this transformer will create. + // + // If either of these is nil, the objects of that type will be skipped and + // not added to the graph at all. It doesn't make sense to use this + // transformer without setting at least one of these, since that would + // skip everything and thus be a no-op. + ConcreteCurrent ConcreteResourceInstanceNodeFunc + ConcreteDeposed ConcreteResourceInstanceDeposedNodeFunc - State *State + State *states.State } func (t *StateTransformer) Transform(g *Graph) error { - // If the state is nil or empty (nil is empty) then do nothing - if t.State.Empty() { + if !t.State.HasResources() { + log.Printf("[TRACE] StateTransformer: state is empty, so nothing to do") return nil } - // Go through all the modules in the diff. - log.Printf("[TRACE] StateTransformer: starting") - var nodes []dag.Vertex + switch { + case t.ConcreteCurrent != nil && t.ConcreteDeposed != nil: + log.Printf("[TRACE] StateTransformer: creating nodes for both current and deposed instance objects") + case t.ConcreteCurrent != nil: + log.Printf("[TRACE] StateTransformer: creating nodes for current instance objects only") + case t.ConcreteDeposed != nil: + log.Printf("[TRACE] StateTransformer: creating nodes for deposed instance objects only") + default: + log.Printf("[TRACE] StateTransformer: pointless no-op call, creating no nodes at all") + } + for _, ms := range t.State.Modules { - log.Printf("[TRACE] StateTransformer: Module: %v", ms.Path) + moduleAddr := ms.Addr - // Go through all the resources in this module. - for name, rs := range ms.Resources { - log.Printf("[TRACE] StateTransformer: Resource %q: %#v", name, rs) + for _, rs := range ms.Resources { + resourceAddr := rs.Addr.Absolute(moduleAddr) - // Add the resource to the graph - addr, err := parseResourceAddressInternal(name) - if err != nil { - panic(fmt.Sprintf( - "Error parsing internal name, this is a bug: %q", name)) - } + for key, is := range rs.Instances { + addr := resourceAddr.Instance(key) - // Very important: add the module path for this resource to - // the address. Remove "root" from it. - addr.Path = ms.Path[1:] + if obj := is.Current; obj != nil && t.ConcreteCurrent != nil { + abstract := NewNodeAbstractResourceInstance(addr) + node := t.ConcreteCurrent(abstract) + g.Add(node) + log.Printf("[TRACE] StateTransformer: added %T for %s current object", node, addr) + } - // Add the resource to the graph - abstract := &NodeAbstractResource{Addr: addr} - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) + if t.ConcreteDeposed != nil { + for dk := range is.Deposed { + abstract := NewNodeAbstractResourceInstance(addr) + node := t.ConcreteDeposed(abstract, dk) + g.Add(node) + log.Printf("[TRACE] StateTransformer: added %T for %s deposed object %s", node, addr, dk) + } + } } - - nodes = append(nodes, node) } } - // Add all the nodes to the graph - for _, n := range nodes { - g.Add(n) - } - return nil } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go index af6defe366..d25274e68b 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go @@ -3,6 +3,7 @@ package terraform import ( "log" + "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/dag" ) @@ -12,7 +13,7 @@ import ( // provided will contain every target provided, and each implementing graph // node must filter this list to targets considered relevant. type GraphNodeTargetable interface { - SetTargets([]ResourceAddress) + SetTargets([]addrs.Targetable) } // GraphNodeTargetDownstream is an interface for graph nodes that need to @@ -35,11 +36,7 @@ type GraphNodeTargetDownstream interface { // their dependencies. type TargetsTransformer struct { // List of targeted resource names specified by the user - Targets []string - - // List of parsed targets, provided by callers like ResourceCountTransform - // that already have the targets parsed - ParsedTargets []ResourceAddress + Targets []addrs.Targetable // If set, the index portions of resource addresses will be ignored // for comparison. This is used when transforming a graph where @@ -53,17 +50,8 @@ type TargetsTransformer struct { } func (t *TargetsTransformer) Transform(g *Graph) error { - if len(t.Targets) > 0 && len(t.ParsedTargets) == 0 { - addrs, err := t.parseTargetAddresses() - if err != nil { - return err - } - - t.ParsedTargets = addrs - } - - if len(t.ParsedTargets) > 0 { - targetedNodes, err := t.selectTargetedNodes(g, t.ParsedTargets) + if len(t.Targets) > 0 { + targetedNodes, err := t.selectTargetedNodes(g, t.Targets) if err != nil { return err } @@ -88,24 +76,10 @@ func (t *TargetsTransformer) Transform(g *Graph) error { return nil } -func (t *TargetsTransformer) parseTargetAddresses() ([]ResourceAddress, error) { - addrs := make([]ResourceAddress, len(t.Targets)) - for i, target := range t.Targets { - ta, err := ParseResourceAddress(target) - if err != nil { - return nil, err - } - addrs[i] = *ta - } - - return addrs, nil -} - -// Returns the list of targeted nodes. A targeted node is either addressed -// directly, or is an Ancestor of a targeted node. Destroy mode keeps -// Descendents instead of Ancestors. -func (t *TargetsTransformer) selectTargetedNodes( - g *Graph, addrs []ResourceAddress) (*dag.Set, error) { +// Returns a set of targeted nodes. A targeted node is either addressed +// directly, address indirectly via its container, or it's a dependency of a +// targeted node. Destroy mode keeps dependents instead of dependencies. +func (t *TargetsTransformer) selectTargetedNodes(g *Graph, addrs []addrs.Targetable) (*dag.Set, error) { targetedNodes := new(dag.Set) vertices := g.Vertices() @@ -154,6 +128,12 @@ func (t *TargetsTransformer) addDependencies(targetedNodes *dag.Set, g *Graph) ( vertices := queue queue = nil // ready to append for next iteration if neccessary for _, v := range vertices { + // providers don't cause transitive dependencies, so don't target + // downstream from them. + if _, ok := v.(GraphNodeProvider); ok { + continue + } + dependers := g.UpEdges(v) if dependers == nil { // indicates that there are no up edges for this node, so @@ -240,21 +220,34 @@ func filterPartialOutputs(v interface{}, targetedNodes *dag.Set, g *Graph) bool return true } -func (t *TargetsTransformer) nodeIsTarget( - v dag.Vertex, addrs []ResourceAddress) bool { - r, ok := v.(GraphNodeResource) +func (t *TargetsTransformer) nodeIsTarget(v dag.Vertex, targets []addrs.Targetable) bool { + var vertexAddr addrs.Targetable + switch r := v.(type) { + case GraphNodeResourceInstance: + vertexAddr = r.ResourceInstanceAddr() + case GraphNodeResource: + vertexAddr = r.ResourceAddr() + default: + // Only resource and resource instance nodes can be targeted. + return false + } + _, ok := v.(GraphNodeResource) if !ok { return false } - addr := r.ResourceAddr() - for _, targetAddr := range addrs { + for _, targetAddr := range targets { if t.IgnoreIndices { - // targetAddr is not a pointer, so we can safely mutate it without - // interfering with references elsewhere. - targetAddr.Index = -1 + // If we're ignoring indices then we'll convert any resource instance + // addresses into resource addresses. We don't need to convert + // vertexAddr because instance addresses are contained within + // their associated resources, and so .TargetContains will take + // care of this for us. + if instance, isInstance := targetAddr.(addrs.AbsResourceInstance); isInstance { + targetAddr = instance.ContainingResource() + } } - if targetAddr.Contains(addr) { + if targetAddr.TargetContains(vertexAddr) { return true } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go index b31e2c765f..05daa5135a 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go @@ -1,7 +1,8 @@ package terraform import ( - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" ) // RootVariableTransformer is a GraphTransformer that adds all the root @@ -11,28 +12,27 @@ import ( // graph since downstream things that depend on them must be able to // reach them. type RootVariableTransformer struct { - Module *module.Tree + Config *configs.Config } func (t *RootVariableTransformer) Transform(g *Graph) error { - // If no config, no variables - if t.Module == nil { + // We can have no variables if we have no config. + if t.Config == nil { return nil } - // If we have no vars, we're done! - vars := t.Module.Config().Variables - if len(vars) == 0 { - return nil - } + // We're only considering root module variables here, since child + // module variables are handled by ModuleVariableTransformer. + vars := t.Config.Module.Variables // Add all variables here for _, v := range vars { node := &NodeRootVariable{ + Addr: addrs.InputVariable{ + Name: v.Name, + }, Config: v, } - - // Add it! g.Add(node) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input.go index 7c87459220..f6790d9e5f 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_input.go +++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input.go @@ -1,10 +1,12 @@ package terraform +import "context" + // UIInput is the interface that must be implemented to ask for input // from this user. This should forward the request to wherever the user // inputs things to ask for values. type UIInput interface { - Input(*InputOpts) (string, error) + Input(context.Context, *InputOpts) (string, error) } // InputOpts are options for asking for input. diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go index e3a07efa33..e2d9c38481 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go +++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go @@ -1,5 +1,7 @@ package terraform +import "context" + // MockUIInput is an implementation of UIInput that can be used for tests. type MockUIInput struct { InputCalled bool @@ -10,7 +12,7 @@ type MockUIInput struct { InputFn func(*InputOpts) (string, error) } -func (i *MockUIInput) Input(opts *InputOpts) (string, error) { +func (i *MockUIInput) Input(ctx context.Context, opts *InputOpts) (string, error) { i.InputCalled = true i.InputOpts = opts if i.InputFn != nil { diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go index 2207d1d0fd..b5d32b1e85 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go +++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go @@ -1,6 +1,7 @@ package terraform import ( + "context" "fmt" ) @@ -12,8 +13,8 @@ type PrefixUIInput struct { UIInput UIInput } -func (i *PrefixUIInput) Input(opts *InputOpts) (string, error) { +func (i *PrefixUIInput) Input(ctx context.Context, opts *InputOpts) (string, error) { opts.Id = fmt.Sprintf("%s.%s", i.IdPrefix, opts.Id) opts.Query = fmt.Sprintf("%s%s", i.QueryPrefix, opts.Query) - return i.UIInput.Input(opts) + return i.UIInput.Input(ctx, opts) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go index 878a03122f..fff964f4bd 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go +++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go @@ -1,15 +1,19 @@ package terraform +import ( + "github.com/hashicorp/terraform/addrs" +) + // ProvisionerUIOutput is an implementation of UIOutput that calls a hook // for the output so that the hooks can handle it. type ProvisionerUIOutput struct { - Info *InstanceInfo - Type string - Hooks []Hook + InstanceAddr addrs.AbsResourceInstance + ProvisionerType string + Hooks []Hook } func (o *ProvisionerUIOutput) Output(msg string) { for _, h := range o.Hooks { - h.ProvisionOutput(o.Info, o.Type, msg) + h.ProvisionOutput(o.InstanceAddr, o.ProvisionerType, msg) } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/valuesourcetype_string.go b/vendor/github.com/hashicorp/terraform/terraform/valuesourcetype_string.go new file mode 100644 index 0000000000..1380c851f9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/valuesourcetype_string.go @@ -0,0 +1,44 @@ +// Code generated by "stringer -type ValueSourceType"; DO NOT EDIT. + +package terraform + +import "strconv" + +const ( + _ValueSourceType_name_0 = "ValueFromUnknown" + _ValueSourceType_name_1 = "ValueFromCLIArg" + _ValueSourceType_name_2 = "ValueFromConfig" + _ValueSourceType_name_3 = "ValueFromEnvVarValueFromAutoFile" + _ValueSourceType_name_4 = "ValueFromInput" + _ValueSourceType_name_5 = "ValueFromNamedFile" + _ValueSourceType_name_6 = "ValueFromPlan" + _ValueSourceType_name_7 = "ValueFromCaller" +) + +var ( + _ValueSourceType_index_3 = [...]uint8{0, 15, 32} +) + +func (i ValueSourceType) String() string { + switch { + case i == 0: + return _ValueSourceType_name_0 + case i == 65: + return _ValueSourceType_name_1 + case i == 67: + return _ValueSourceType_name_2 + case 69 <= i && i <= 70: + i -= 69 + return _ValueSourceType_name_3[_ValueSourceType_index_3[i]:_ValueSourceType_index_3[i+1]] + case i == 73: + return _ValueSourceType_name_4 + case i == 78: + return _ValueSourceType_name_5 + case i == 80: + return _ValueSourceType_name_6 + case i == 83: + return _ValueSourceType_name_7 + default: + return "ValueSourceType(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/variables.go b/vendor/github.com/hashicorp/terraform/terraform/variables.go index 300f2adb1a..75531b2bf3 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/variables.go +++ b/vendor/github.com/hashicorp/terraform/terraform/variables.go @@ -2,165 +2,312 @@ package terraform import ( "fmt" - "os" - "strings" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/helper/hilmapstructure" + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/tfdiags" +) + +// InputValue represents a value for a variable in the root module, provided +// as part of the definition of an operation. +type InputValue struct { + Value cty.Value + SourceType ValueSourceType + + // SourceRange provides source location information for values whose + // SourceType is either ValueFromConfig or ValueFromFile. It is not + // populated for other source types, and so should not be used. + SourceRange tfdiags.SourceRange +} + +// ValueSourceType describes what broad category of source location provided +// a particular value. +type ValueSourceType rune + +const ( + // ValueFromUnknown is the zero value of ValueSourceType and is not valid. + ValueFromUnknown ValueSourceType = 0 + + // ValueFromConfig indicates that a value came from a .tf or .tf.json file, + // e.g. the default value defined for a variable. + ValueFromConfig ValueSourceType = 'C' + + // ValueFromAutoFile indicates that a value came from a "values file", like + // a .tfvars file, that was implicitly loaded by naming convention. + ValueFromAutoFile ValueSourceType = 'F' + + // ValueFromNamedFile indicates that a value came from a named "values file", + // like a .tfvars file, that was passed explicitly on the command line (e.g. + // -var-file=foo.tfvars). + ValueFromNamedFile ValueSourceType = 'N' + + // ValueFromCLIArg indicates that the value was provided directly in + // a CLI argument. The name of this argument is not recorded and so it must + // be inferred from context. + ValueFromCLIArg ValueSourceType = 'A' + + // ValueFromEnvVar indicates that the value was provided via an environment + // variable. The name of the variable is not recorded and so it must be + // inferred from context. + ValueFromEnvVar ValueSourceType = 'E' + + // ValueFromInput indicates that the value was provided at an interactive + // input prompt. + ValueFromInput ValueSourceType = 'I' + + // ValueFromPlan indicates that the value was retrieved from a stored plan. + ValueFromPlan ValueSourceType = 'P' + + // ValueFromCaller indicates that the value was explicitly overridden by + // a caller to Context.SetVariable after the context was constructed. + ValueFromCaller ValueSourceType = 'S' ) -// Variables returns the fully loaded set of variables to use with -// ContextOpts and NewContext, loading any additional variables from -// the environment or any other sources. +func (v *InputValue) GoString() string { + if (v.SourceRange != tfdiags.SourceRange{}) { + return fmt.Sprintf("&terraform.InputValue{Value: %#v, SourceType: %#v, SourceRange: %#v}", v.Value, v.SourceType, v.SourceRange) + } else { + return fmt.Sprintf("&terraform.InputValue{Value: %#v, SourceType: %#v}", v.Value, v.SourceType) + } +} + +func (v ValueSourceType) GoString() string { + return fmt.Sprintf("terraform.%s", v) +} + +//go:generate stringer -type ValueSourceType + +// InputValues is a map of InputValue instances. +type InputValues map[string]*InputValue + +// InputValuesFromCaller turns the given map of naked values into an +// InputValues that attributes each value to "a caller", using the source +// type ValueFromCaller. This is primarily useful for testing purposes. // -// The given module tree doesn't need to be loaded. -func Variables( - m *module.Tree, - override map[string]interface{}) (map[string]interface{}, error) { - result := make(map[string]interface{}) - - // Variables are loaded in the following sequence. Each additional step - // will override conflicting variable keys from prior steps: - // - // * Take default values from config - // * Take values from TF_VAR_x env vars - // * Take values specified in the "override" param which is usually - // from -var, -var-file, etc. - // - - // First load from the config - for _, v := range m.Config().Variables { - // If the var has no default, ignore - if v.Default == nil { - continue +// This should not be used as a general way to convert map[string]cty.Value +// into InputValues, since in most real cases we want to set a suitable +// other SourceType and possibly SourceRange value. +func InputValuesFromCaller(vals map[string]cty.Value) InputValues { + ret := make(InputValues, len(vals)) + for k, v := range vals { + ret[k] = &InputValue{ + Value: v, + SourceType: ValueFromCaller, } + } + return ret +} - // If the type isn't a string, we use it as-is since it is a rich type - if v.Type() != config.VariableTypeString { - result[v.Name] = v.Default - continue +// Override merges the given value maps with the receiver, overriding any +// conflicting keys so that the latest definition wins. +func (vv InputValues) Override(others ...InputValues) InputValues { + // FIXME: This should check to see if any of the values are maps and + // merge them if so, in order to preserve the behavior from prior to + // Terraform 0.12. + ret := make(InputValues) + for k, v := range vv { + ret[k] = v + } + for _, other := range others { + for k, v := range other { + ret[k] = v } + } + return ret +} - // v.Default has already been parsed as HCL but it may be an int type - switch typedDefault := v.Default.(type) { - case string: - if typedDefault == "" { - continue - } - result[v.Name] = typedDefault - case int, int64: - result[v.Name] = fmt.Sprintf("%d", typedDefault) - case float32, float64: - result[v.Name] = fmt.Sprintf("%f", typedDefault) - case bool: - result[v.Name] = fmt.Sprintf("%t", typedDefault) - default: - panic(fmt.Sprintf( - "Unknown default var type: %T\n\n"+ - "THIS IS A BUG. Please report it.", - v.Default)) - } +// JustValues returns a map that just includes the values, discarding the +// source information. +func (vv InputValues) JustValues() map[string]cty.Value { + ret := make(map[string]cty.Value, len(vv)) + for k, v := range vv { + ret[k] = v.Value } + return ret +} - // Load from env vars - for _, v := range os.Environ() { - if !strings.HasPrefix(v, VarEnvPrefix) { +// DefaultVariableValues returns an InputValues map representing the default +// values specified for variables in the given configuration map. +func DefaultVariableValues(configs map[string]*configs.Variable) InputValues { + ret := make(InputValues) + for k, c := range configs { + if c.Default == cty.NilVal { continue } + ret[k] = &InputValue{ + Value: c.Default, + SourceType: ValueFromConfig, + SourceRange: tfdiags.SourceRangeFromHCL(c.DeclRange), + } + } + return ret +} - // Strip off the prefix and get the value after the first "=" - idx := strings.Index(v, "=") - k := v[len(VarEnvPrefix):idx] - v = v[idx+1:] - - // Override the configuration-default values. Note that *not* finding the variable - // in configuration is OK, as we don't want to preclude people from having multiple - // sets of TF_VAR_whatever in their environment even if it is a little weird. - for _, schema := range m.Config().Variables { - if schema.Name != k { - continue - } - - varType := schema.Type() - varVal, err := parseVariableAsHCL(k, v, varType) - if err != nil { - return nil, err - } +// SameValues returns true if the given InputValues has the same values as +// the receiever, disregarding the source types and source ranges. +// +// Values are compared using the cty "RawEquals" method, which means that +// unknown values can be considered equal to one another if they are of the +// same type. +func (vv InputValues) SameValues(other InputValues) bool { + if len(vv) != len(other) { + return false + } - switch varType { - case config.VariableTypeMap: - if err := varSetMap(result, k, varVal); err != nil { - return nil, err - } - default: - result[k] = varVal - } + for k, v := range vv { + ov, exists := other[k] + if !exists { + return false + } + if !v.Value.RawEquals(ov.Value) { + return false } } - // Load from overrides - for k, v := range override { - for _, schema := range m.Config().Variables { - if schema.Name != k { - continue - } + return true +} - switch schema.Type() { - case config.VariableTypeList: - result[k] = v - case config.VariableTypeMap: - if err := varSetMap(result, k, v); err != nil { - return nil, err - } - case config.VariableTypeString: - // Convert to a string and set. We don't catch any errors - // here because the validation step later should catch - // any type errors. - var strVal string - if err := hilmapstructure.WeakDecode(v, &strVal); err == nil { - result[k] = strVal - } else { - result[k] = v - } - default: - panic(fmt.Sprintf( - "Unhandled var type: %T\n\n"+ - "THIS IS A BUG. Please report it.", - schema.Type())) - } +// HasValues returns true if the reciever has the same values as in the given +// map, disregarding the source types and source ranges. +// +// Values are compared using the cty "RawEquals" method, which means that +// unknown values can be considered equal to one another if they are of the +// same type. +func (vv InputValues) HasValues(vals map[string]cty.Value) bool { + if len(vv) != len(vals) { + return false + } + + for k, v := range vv { + oVal, exists := vals[k] + if !exists { + return false + } + if !v.Value.RawEquals(oVal) { + return false } } - return result, nil + return true } -// varSetMap sets or merges the map in "v" with the key "k" in the -// "current" set of variables. This is just a private function to remove -// duplicate logic in Variables -func varSetMap(current map[string]interface{}, k string, v interface{}) error { - existing, ok := current[k] - if !ok { - current[k] = v - return nil +// Identical returns true if the given InputValues has the same values, +// source types, and source ranges as the receiver. +// +// Values are compared using the cty "RawEquals" method, which means that +// unknown values can be considered equal to one another if they are of the +// same type. +// +// This method is primarily for testing. For most practical purposes, it's +// better to use SameValues or HasValues. +func (vv InputValues) Identical(other InputValues) bool { + if len(vv) != len(other) { + return false } - existingMap, ok := existing.(map[string]interface{}) - if !ok { - panic(fmt.Sprintf("%q is not a map, this is a bug in Terraform.", k)) + for k, v := range vv { + ov, exists := other[k] + if !exists { + return false + } + if !v.Value.RawEquals(ov.Value) { + return false + } + if v.SourceType != ov.SourceType { + return false + } + if v.SourceRange != ov.SourceRange { + return false + } } - switch typedV := v.(type) { - case []map[string]interface{}: - for newKey, newVal := range typedV[0] { - existingMap[newKey] = newVal + return true +} + +// checkInputVariables ensures that variable values supplied at the UI conform +// to their corresponding declarations in configuration. +// +// The set of values is considered valid only if the returned diagnostics +// does not contain errors. A valid set of values may still produce warnings, +// which should be returned to the user. +func checkInputVariables(vcs map[string]*configs.Variable, vs InputValues) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + for name, vc := range vcs { + val, isSet := vs[name] + if !isSet { + // Always an error, since the caller should already have included + // default values from the configuration in the values map. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unassigned variable", + fmt.Sprintf("The input variable %q has not been assigned a value. This is a bug in Terraform; please report it in a GitHub issue.", name), + )) + continue } - case map[string]interface{}: - for newKey, newVal := range typedV { - existingMap[newKey] = newVal + + wantType := vc.Type + + // A given value is valid if it can convert to the desired type. + _, err := convert.Convert(val.Value, wantType) + if err != nil { + switch val.SourceType { + case ValueFromConfig, ValueFromAutoFile, ValueFromNamedFile: + // We have source location information for these. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid value for input variable", + Detail: fmt.Sprintf("The given value is not valid for variable %q: %s.", name, err), + Subject: val.SourceRange.ToHCL().Ptr(), + }) + case ValueFromEnvVar: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid value for input variable", + fmt.Sprintf("The environment variable TF_VAR_%s does not contain a valid value for variable %q: %s.", name, name, err), + )) + case ValueFromCLIArg: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid value for input variable", + fmt.Sprintf("The argument -var=\"%s=...\" does not contain a valid value for variable %q: %s.", name, name, err), + )) + case ValueFromInput: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid value for input variable", + fmt.Sprintf("The value entered for variable %q is not valid: %s.", name, err), + )) + default: + // The above gets us good coverage for the situations users + // are likely to encounter with their own inputs. The other + // cases are generally implementation bugs, so we'll just + // use a generic error for these. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid value for input variable", + fmt.Sprintf("The value provided for variable %q is not valid: %s.", name, err), + )) + } } - default: - return fmt.Errorf("variable %q should be type map, got %s", k, hclTypeName(v)) } - return nil + + // Check for any variables that are assigned without being configured. + // This is always an implementation error in the caller, because we + // expect undefined variables to be caught during context construction + // where there is better context to report it well. + for name := range vs { + if _, defined := vcs[name]; !defined { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Value assigned to undeclared variable", + fmt.Sprintf("A value was assigned to an undeclared input variable %q.", name), + )) + } + } + + return diags } diff --git a/vendor/github.com/hashicorp/terraform/terraform/version_required.go b/vendor/github.com/hashicorp/terraform/terraform/version_required.go index 1f43045782..61423c21f9 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/version_required.go +++ b/vendor/github.com/hashicorp/terraform/terraform/version_required.go @@ -3,69 +3,60 @@ package terraform import ( "fmt" - "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/tfdiags" + + "github.com/hashicorp/terraform/configs" tfversion "github.com/hashicorp/terraform/version" ) -// CheckRequiredVersion verifies that any version requirements specified by -// the configuration are met. -// -// This checks the root module as well as any additional version requirements -// from child modules. +// CheckCoreVersionRequirements visits each of the modules in the given +// configuration tree and verifies that any given Core version constraints +// match with the version of Terraform Core that is being used. // -// This is tested in context_test.go. -func CheckRequiredVersion(m *module.Tree) error { - // Check any children - for _, c := range m.Children() { - if err := CheckRequiredVersion(c); err != nil { - return err - } - } - - var tf *config.Terraform - if c := m.Config(); c != nil { - tf = c.Terraform - } - - // If there is no Terraform config or the required version isn't set, - // we move on. - if tf == nil || tf.RequiredVersion == "" { +// The returned diagnostics will contain errors if any constraints do not match. +// The returned diagnostics might also return warnings, which should be +// displayed to the user. +func CheckCoreVersionRequirements(config *configs.Config) tfdiags.Diagnostics { + if config == nil { return nil } - // Path for errors - module := "root" - if path := normalizeModulePath(m.Path()); len(path) > 1 { - module = modulePrefixStr(path) - } - - // Check this version requirement of this module - cs, err := version.NewConstraint(tf.RequiredVersion) - if err != nil { - return fmt.Errorf( - "%s: terraform.required_version %q syntax error: %s", - module, - tf.RequiredVersion, err) + var diags tfdiags.Diagnostics + module := config.Module + + for _, constraint := range module.CoreVersionConstraints { + if !constraint.Required.Check(tfversion.SemVer) { + switch { + case len(config.Path) == 0: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported Terraform Core version", + Detail: fmt.Sprintf( + "This configuration does not support Terraform version %s. To proceed, either choose another supported Terraform version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", + tfversion.String(), + ), + Subject: &constraint.DeclRange, + }) + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported Terraform Core version", + Detail: fmt.Sprintf( + "Module %s (from %s) does not support Terraform version %s. To proceed, either choose another supported Terraform version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", + config.Path, config.SourceAddr, tfversion.String(), + ), + Subject: &constraint.DeclRange, + }) + } + } } - if !cs.Check(tfversion.SemVer) { - return fmt.Errorf( - "The currently running version of Terraform doesn't meet the\n"+ - "version requirements explicitly specified by the configuration.\n"+ - "Please use the required version or update the configuration.\n"+ - "Note that version requirements are usually set for a reason, so\n"+ - "we recommend verifying with whoever set the version requirements\n"+ - "prior to making any manual changes.\n\n"+ - " Module: %s\n"+ - " Required version: %s\n"+ - " Current version: %s", - module, - tf.RequiredVersion, - tfversion.SemVer) + for _, c := range config.Children { + childDiags := CheckCoreVersionRequirements(c) + diags = diags.Append(childDiags) } - return nil + return diags } diff --git a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go index 4cfc528ef0..a802b5286e 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go +++ b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go @@ -4,9 +4,9 @@ package terraform import "strconv" -const _walkOperation_name = "walkInvalidwalkInputwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImport" +const _walkOperation_name = "walkInvalidwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImportwalkEval" -var _walkOperation_index = [...]uint8{0, 11, 20, 29, 37, 52, 63, 75, 86, 96} +var _walkOperation_index = [...]uint8{0, 11, 20, 28, 43, 54, 66, 77, 87, 95} func (i walkOperation) String() string { if i >= walkOperation(len(_walkOperation_index)-1) { diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/config_traversals.go b/vendor/github.com/hashicorp/terraform/tfdiags/config_traversals.go new file mode 100644 index 0000000000..8e41f46ed2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/config_traversals.go @@ -0,0 +1,68 @@ +package tfdiags + +import ( + "bytes" + "fmt" + "strconv" + + "github.com/zclconf/go-cty/cty" +) + +// FormatCtyPath is a helper function to produce a user-friendly string +// representation of a cty.Path. The result uses a syntax similar to the +// HCL expression language in the hope of it being familiar to users. +func FormatCtyPath(path cty.Path) string { + var buf bytes.Buffer + for _, step := range path { + switch ts := step.(type) { + case cty.GetAttrStep: + fmt.Fprintf(&buf, ".%s", ts.Name) + case cty.IndexStep: + buf.WriteByte('[') + key := ts.Key + keyTy := key.Type() + switch { + case key.IsNull(): + buf.WriteString("null") + case !key.IsKnown(): + buf.WriteString("(not yet known)") + case keyTy == cty.Number: + bf := key.AsBigFloat() + buf.WriteString(bf.Text('g', -1)) + case keyTy == cty.String: + buf.WriteString(strconv.Quote(key.AsString())) + default: + buf.WriteString("...") + } + buf.WriteByte(']') + } + } + return buf.String() +} + +// FormatError is a helper function to produce a user-friendly string +// representation of certain special error types that we might want to +// include in diagnostic messages. +// +// This currently has special behavior only for cty.PathError, where a +// non-empty path is rendered in a HCL-like syntax as context. +func FormatError(err error) string { + perr, ok := err.(cty.PathError) + if !ok || len(perr.Path) == 0 { + return err.Error() + } + + return fmt.Sprintf("%s: %s", FormatCtyPath(perr.Path), perr.Error()) +} + +// FormatErrorPrefixed is like FormatError except that it presents any path +// information after the given prefix string, which is assumed to contain +// an HCL syntax representation of the value that errors are relative to. +func FormatErrorPrefixed(err error, prefix string) string { + perr, ok := err.(cty.PathError) + if !ok || len(perr.Path) == 0 { + return fmt.Sprintf("%s: %s", prefix, err.Error()) + } + + return fmt.Sprintf("%s%s: %s", prefix, FormatCtyPath(perr.Path), perr.Error()) +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/contextual.go b/vendor/github.com/hashicorp/terraform/tfdiags/contextual.go new file mode 100644 index 0000000000..25b2140372 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/contextual.go @@ -0,0 +1,372 @@ +package tfdiags + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" +) + +// The "contextual" family of diagnostics are designed to allow separating +// the detection of a problem from placing that problem in context. For +// example, some code that is validating an object extracted from configuration +// may not have access to the configuration that generated it, but can still +// report problems within that object which the caller can then place in +// context by calling IsConfigBody on the returned diagnostics. +// +// When contextual diagnostics are used, the documentation for a method must +// be very explicit about what context is implied for any diagnostics returned, +// to help ensure the expected result. + +// contextualFromConfig is an interface type implemented by diagnostic types +// that can elaborate themselves when given information about the configuration +// body they are embedded in. +// +// Usually this entails extracting source location information in order to +// populate the "Subject" range. +type contextualFromConfigBody interface { + ElaborateFromConfigBody(hcl.Body) Diagnostic +} + +// InConfigBody returns a copy of the receiver with any config-contextual +// diagnostics elaborated in the context of the given body. +func (d Diagnostics) InConfigBody(body hcl.Body) Diagnostics { + if len(d) == 0 { + return nil + } + + ret := make(Diagnostics, len(d)) + for i, srcDiag := range d { + if cd, isCD := srcDiag.(contextualFromConfigBody); isCD { + ret[i] = cd.ElaborateFromConfigBody(body) + } else { + ret[i] = srcDiag + } + } + + return ret +} + +// AttributeValue returns a diagnostic about an attribute value in an implied current +// configuration context. This should be returned only from functions whose +// interface specifies a clear configuration context that this will be +// resolved in. +// +// The given path is relative to the implied configuration context. To describe +// a top-level attribute, it should be a single-element cty.Path with a +// cty.GetAttrStep. It's assumed that the path is returning into a structure +// that would be produced by our conventions in the configschema package; it +// may return unexpected results for structures that can't be represented by +// configschema. +// +// Since mapping attribute paths back onto configuration is an imprecise +// operation (e.g. dynamic block generation may cause the same block to be +// evaluated multiple times) the diagnostic detail should include the attribute +// name and other context required to help the user understand what is being +// referenced in case the identified source range is not unique. +// +// The returned attribute will not have source location information until +// context is applied to the containing diagnostics using diags.InConfigBody. +// After context is applied, the source location is the value assigned to the +// named attribute, or the containing body's "missing item range" if no +// value is present. +func AttributeValue(severity Severity, summary, detail string, attrPath cty.Path) Diagnostic { + return &attributeDiagnostic{ + diagnosticBase: diagnosticBase{ + severity: severity, + summary: summary, + detail: detail, + }, + attrPath: attrPath, + } +} + +// GetAttribute extracts an attribute cty.Path from a diagnostic if it contains +// one. Normally this is not accessed directly, and instead the config body is +// added to the Diagnostic to create a more complete message for the user. In +// some cases however, we may want to know just the name of the attribute that +// generated the Diagnostic message. +// This returns a nil cty.Path if it does not exist in the Diagnostic. +func GetAttribute(d Diagnostic) cty.Path { + if d, ok := d.(*attributeDiagnostic); ok { + return d.attrPath + } + return nil +} + +type attributeDiagnostic struct { + diagnosticBase + attrPath cty.Path + subject *SourceRange // populated only after ElaborateFromConfigBody +} + +// ElaborateFromConfigBody finds the most accurate possible source location +// for a diagnostic's attribute path within the given body. +// +// Backing out from a path back to a source location is not always entirely +// possible because we lose some information in the decoding process, so +// if an exact position cannot be found then the returned diagnostic will +// refer to a position somewhere within the containing body, which is assumed +// to be better than no location at all. +// +// If possible it is generally better to report an error at a layer where +// source location information is still available, for more accuracy. This +// is not always possible due to system architecture, so this serves as a +// "best effort" fallback behavior for such situations. +func (d *attributeDiagnostic) ElaborateFromConfigBody(body hcl.Body) Diagnostic { + if len(d.attrPath) < 1 { + // Should never happen, but we'll allow it rather than crashing. + return d + } + + if d.subject != nil { + // Don't modify an already-elaborated diagnostic. + return d + } + + ret := *d + + // This function will often end up re-decoding values that were already + // decoded by an earlier step. This is non-ideal but is architecturally + // more convenient than arranging for source location information to be + // propagated to every place in Terraform, and this happens only in the + // presence of errors where performance isn't a concern. + + traverse := d.attrPath[:] + final := d.attrPath[len(d.attrPath)-1] + + // Index should never be the first step + // as indexing of top blocks (such as resources & data sources) + // is handled elsewhere + if _, isIdxStep := traverse[0].(cty.IndexStep); isIdxStep { + subject := SourceRangeFromHCL(body.MissingItemRange()) + ret.subject = &subject + return &ret + } + + // Process index separately + idxStep, hasIdx := final.(cty.IndexStep) + if hasIdx { + final = d.attrPath[len(d.attrPath)-2] + traverse = d.attrPath[:len(d.attrPath)-1] + } + + // If we have more than one step after removing index + // then we'll first try to traverse to a child body + // corresponding to the requested path. + if len(traverse) > 1 { + body = traversePathSteps(traverse, body) + } + + // Default is to indicate a missing item in the deepest body we reached + // while traversing. + subject := SourceRangeFromHCL(body.MissingItemRange()) + ret.subject = &subject + + // Once we get here, "final" should be a GetAttr step that maps to an + // attribute in our current body. + finalStep, isAttr := final.(cty.GetAttrStep) + if !isAttr { + return &ret + } + + content, _, contentDiags := body.PartialContent(&hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: finalStep.Name, + Required: true, + }, + }, + }) + if contentDiags.HasErrors() { + return &ret + } + + if attr, ok := content.Attributes[finalStep.Name]; ok { + hclRange := attr.Expr.Range() + if hasIdx { + // Try to be more precise by finding index range + hclRange = hclRangeFromIndexStepAndAttribute(idxStep, attr) + } + subject = SourceRangeFromHCL(hclRange) + ret.subject = &subject + } + + return &ret +} + +func traversePathSteps(traverse []cty.PathStep, body hcl.Body) hcl.Body { + for i := 0; i < len(traverse); i++ { + step := traverse[i] + + switch tStep := step.(type) { + case cty.GetAttrStep: + + var next cty.PathStep + if i < (len(traverse) - 1) { + next = traverse[i+1] + } + + // Will be indexing into our result here? + var indexType cty.Type + var indexVal cty.Value + if nextIndex, ok := next.(cty.IndexStep); ok { + indexVal = nextIndex.Key + indexType = indexVal.Type() + i++ // skip over the index on subsequent iterations + } + + var blockLabelNames []string + if indexType == cty.String { + // Map traversal means we expect one label for the key. + blockLabelNames = []string{"key"} + } + + // For intermediate steps we expect to be referring to a child + // block, so we'll attempt decoding under that assumption. + content, _, contentDiags := body.PartialContent(&hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: tStep.Name, + LabelNames: blockLabelNames, + }, + }, + }) + if contentDiags.HasErrors() { + return body + } + filtered := make([]*hcl.Block, 0, len(content.Blocks)) + for _, block := range content.Blocks { + if block.Type == tStep.Name { + filtered = append(filtered, block) + } + } + if len(filtered) == 0 { + // Step doesn't refer to a block + continue + } + + switch indexType { + case cty.NilType: // no index at all + if len(filtered) != 1 { + return body + } + body = filtered[0].Body + case cty.Number: + var idx int + err := gocty.FromCtyValue(indexVal, &idx) + if err != nil || idx >= len(filtered) { + return body + } + body = filtered[idx].Body + case cty.String: + key := indexVal.AsString() + var block *hcl.Block + for _, candidate := range filtered { + if candidate.Labels[0] == key { + block = candidate + break + } + } + if block == nil { + // No block with this key, so we'll just indicate a + // missing item in the containing block. + return body + } + body = block.Body + default: + // Should never happen, because only string and numeric indices + // are supported by cty collections. + return body + } + + default: + // For any other kind of step, we'll just return our current body + // as the subject and accept that this is a little inaccurate. + return body + } + } + return body +} + +func hclRangeFromIndexStepAndAttribute(idxStep cty.IndexStep, attr *hcl.Attribute) hcl.Range { + switch idxStep.Key.Type() { + case cty.Number: + var idx int + err := gocty.FromCtyValue(idxStep.Key, &idx) + items, diags := hcl.ExprList(attr.Expr) + if diags.HasErrors() { + return attr.Expr.Range() + } + if err != nil || idx >= len(items) { + return attr.NameRange + } + return items[idx].Range() + case cty.String: + pairs, diags := hcl.ExprMap(attr.Expr) + if diags.HasErrors() { + return attr.Expr.Range() + } + stepKey := idxStep.Key.AsString() + for _, kvPair := range pairs { + key, err := kvPair.Key.Value(nil) + if err != nil { + return attr.Expr.Range() + } + if key.AsString() == stepKey { + startRng := kvPair.Value.StartRange() + return startRng + } + } + return attr.NameRange + } + return attr.Expr.Range() +} + +func (d *attributeDiagnostic) Source() Source { + return Source{ + Subject: d.subject, + } +} + +// WholeContainingBody returns a diagnostic about the body that is an implied +// current configuration context. This should be returned only from +// functions whose interface specifies a clear configuration context that this +// will be resolved in. +// +// The returned attribute will not have source location information until +// context is applied to the containing diagnostics using diags.InConfigBody. +// After context is applied, the source location is currently the missing item +// range of the body. In future, this may change to some other suitable +// part of the containing body. +func WholeContainingBody(severity Severity, summary, detail string) Diagnostic { + return &wholeBodyDiagnostic{ + diagnosticBase: diagnosticBase{ + severity: severity, + summary: summary, + detail: detail, + }, + } +} + +type wholeBodyDiagnostic struct { + diagnosticBase + subject *SourceRange // populated only after ElaborateFromConfigBody +} + +func (d *wholeBodyDiagnostic) ElaborateFromConfigBody(body hcl.Body) Diagnostic { + if d.subject != nil { + // Don't modify an already-elaborated diagnostic. + return d + } + + ret := *d + rng := SourceRangeFromHCL(body.MissingItemRange()) + ret.subject = &rng + return &ret +} + +func (d *wholeBodyDiagnostic) Source() Source { + return Source{ + Subject: d.subject, + } +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go index 2c23f76ae5..c91ba9a529 100644 --- a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go +++ b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go @@ -1,9 +1,18 @@ package tfdiags +import ( + "github.com/hashicorp/hcl2/hcl" +) + type Diagnostic interface { Severity() Severity Description() Description Source() Source + + // FromExpr returns the expression-related context for the diagnostic, if + // available. Returns nil if the diagnostic is not related to an + // expression evaluation. + FromExpr() *FromExpr } type Severity rune @@ -24,3 +33,8 @@ type Source struct { Subject *SourceRange Context *SourceRange } + +type FromExpr struct { + Expression hcl.Expression + EvalContext *hcl.EvalContext +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic_base.go b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic_base.go new file mode 100644 index 0000000000..50bf9d8eba --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic_base.go @@ -0,0 +1,31 @@ +package tfdiags + +// diagnosticBase can be embedded in other diagnostic structs to get +// default implementations of Severity and Description. This type also +// has default implementations of Source and FromExpr that return no source +// location or expression-related information, so embedders should generally +// override those method to return more useful results where possible. +type diagnosticBase struct { + severity Severity + summary string + detail string +} + +func (d diagnosticBase) Severity() Severity { + return d.severity +} + +func (d diagnosticBase) Description() Description { + return Description{ + Summary: d.summary, + Detail: d.detail, + } +} + +func (d diagnosticBase) Source() Source { + return Source{} +} + +func (d diagnosticBase) FromExpr() *FromExpr { + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go index 667ba8096d..465b230f60 100644 --- a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go +++ b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go @@ -3,6 +3,9 @@ package tfdiags import ( "bytes" "fmt" + "path/filepath" + "sort" + "strings" "github.com/hashicorp/errwrap" multierror "github.com/hashicorp/go-multierror" @@ -54,6 +57,8 @@ func (diags Diagnostics) Append(new ...interface{}) Diagnostics { diags = append(diags, ti...) // flatten case diagnosticsAsError: diags = diags.Append(ti.Diagnostics) // unwrap + case NonFatalError: + diags = diags.Append(ti.Diagnostics) // unwrap case hcl.Diagnostics: for _, hclDiag := range ti { diags = append(diags, hclDiagnostic{hclDiag}) @@ -136,6 +141,54 @@ func (diags Diagnostics) Err() error { return diagnosticsAsError{diags} } +// ErrWithWarnings is similar to Err except that it will also return a non-nil +// error if the receiver contains only warnings. +// +// In the warnings-only situation, the result is guaranteed to be of dynamic +// type NonFatalError, allowing diagnostics-aware callers to type-assert +// and unwrap it, treating it as non-fatal. +// +// This should be used only in contexts where the caller is able to recognize +// and handle NonFatalError. For normal callers that expect a lack of errors +// to be signaled by nil, use just Diagnostics.Err. +func (diags Diagnostics) ErrWithWarnings() error { + if len(diags) == 0 { + return nil + } + if diags.HasErrors() { + return diags.Err() + } + return NonFatalError{diags} +} + +// NonFatalErr is similar to Err except that it always returns either nil +// (if there are no diagnostics at all) or NonFatalError. +// +// This allows diagnostics to be returned over an error return channel while +// being explicit that the diagnostics should not halt processing. +// +// This should be used only in contexts where the caller is able to recognize +// and handle NonFatalError. For normal callers that expect a lack of errors +// to be signaled by nil, use just Diagnostics.Err. +func (diags Diagnostics) NonFatalErr() error { + if len(diags) == 0 { + return nil + } + return NonFatalError{diags} +} + +// Sort applies an ordering to the diagnostics in the receiver in-place. +// +// The ordering is: warnings before errors, sourceless before sourced, +// short source paths before long source paths, and then ordering by +// position within each file. +// +// Diagnostics that do not differ by any of these sortable characteristics +// will remain in the same relative order after this method returns. +func (diags Diagnostics) Sort() { + sort.Stable(sortDiagnostics(diags)) +} + type diagnosticsAsError struct { Diagnostics } @@ -179,3 +232,99 @@ func (dae diagnosticsAsError) WrappedErrors() []error { } return errs } + +// NonFatalError is a special error type, returned by +// Diagnostics.ErrWithWarnings and Diagnostics.NonFatalErr, +// that indicates that the wrapped diagnostics should be treated as non-fatal. +// Callers can conditionally type-assert an error to this type in order to +// detect the non-fatal scenario and handle it in a different way. +type NonFatalError struct { + Diagnostics +} + +func (woe NonFatalError) Error() string { + diags := woe.Diagnostics + switch { + case len(diags) == 0: + // should never happen, since we don't create this wrapper if + // there are no diagnostics in the list. + return "no errors or warnings" + case len(diags) == 1: + desc := diags[0].Description() + if desc.Detail == "" { + return desc.Summary + } + return fmt.Sprintf("%s: %s", desc.Summary, desc.Detail) + default: + var ret bytes.Buffer + if diags.HasErrors() { + fmt.Fprintf(&ret, "%d problems:\n", len(diags)) + } else { + fmt.Fprintf(&ret, "%d warnings:\n", len(diags)) + } + for _, diag := range woe.Diagnostics { + desc := diag.Description() + if desc.Detail == "" { + fmt.Fprintf(&ret, "\n- %s", desc.Summary) + } else { + fmt.Fprintf(&ret, "\n- %s: %s", desc.Summary, desc.Detail) + } + } + return ret.String() + } +} + +// sortDiagnostics is an implementation of sort.Interface +type sortDiagnostics []Diagnostic + +var _ sort.Interface = sortDiagnostics(nil) + +func (sd sortDiagnostics) Len() int { + return len(sd) +} + +func (sd sortDiagnostics) Less(i, j int) bool { + iD, jD := sd[i], sd[j] + iSev, jSev := iD.Severity(), jD.Severity() + iSrc, jSrc := iD.Source(), jD.Source() + + switch { + + case iSev != jSev: + return iSev == Warning + + case (iSrc.Subject == nil) != (jSrc.Subject == nil): + return iSrc.Subject == nil + + case iSrc.Subject != nil && *iSrc.Subject != *jSrc.Subject: + iSubj := iSrc.Subject + jSubj := jSrc.Subject + switch { + case iSubj.Filename != jSubj.Filename: + // Path with fewer segments goes first if they are different lengths + sep := string(filepath.Separator) + iCount := strings.Count(iSubj.Filename, sep) + jCount := strings.Count(jSubj.Filename, sep) + if iCount != jCount { + return iCount < jCount + } + return iSubj.Filename < jSubj.Filename + case iSubj.Start.Byte != jSubj.Start.Byte: + return iSubj.Start.Byte < jSubj.Start.Byte + case iSubj.End.Byte != jSubj.End.Byte: + return iSubj.End.Byte < jSubj.End.Byte + } + fallthrough + + default: + // The remaining properties do not have a defined ordering, so + // we'll leave it unspecified. Since we use sort.Stable in + // the caller of this, the ordering of remaining items will + // be preserved. + return false + } +} + +func (sd sortDiagnostics) Swap(i, j int) { + sd[i], sd[j] = sd[j], sd[i] +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/error.go b/vendor/github.com/hashicorp/terraform/tfdiags/error.go index 35edc3041b..13f7a714f4 100644 --- a/vendor/github.com/hashicorp/terraform/tfdiags/error.go +++ b/vendor/github.com/hashicorp/terraform/tfdiags/error.go @@ -13,7 +13,7 @@ func (e nativeError) Severity() Severity { func (e nativeError) Description() Description { return Description{ - Summary: e.err.Error(), + Summary: FormatError(e.err), } } @@ -21,3 +21,8 @@ func (e nativeError) Source() Source { // No source information available for a native error return Source{} } + +func (e nativeError) FromExpr() *FromExpr { + // Native errors are not expression-related + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go b/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go index 24851f4d00..f9aec41c93 100644 --- a/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go +++ b/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go @@ -40,6 +40,16 @@ func (d hclDiagnostic) Source() Source { return ret } +func (d hclDiagnostic) FromExpr() *FromExpr { + if d.diag.Expression == nil || d.diag.EvalContext == nil { + return nil + } + return &FromExpr{ + Expression: d.diag.Expression, + EvalContext: d.diag.EvalContext, + } +} + // SourceRangeFromHCL constructs a SourceRange from the corresponding range // type within the HCL package. func SourceRangeFromHCL(hclRange hcl.Range) SourceRange { diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go b/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go index 6cc95cc2ed..485063b0c0 100644 --- a/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go +++ b/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go @@ -48,6 +48,12 @@ func (d *rpcFriendlyDiag) Source() Source { } } +func (d rpcFriendlyDiag) FromExpr() *FromExpr { + // RPC-friendly diagnostics cannot preserve expression information because + // expressions themselves are not RPC-friendly. + return nil +} + func init() { gob.Register((*rpcFriendlyDiag)(nil)) } diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go b/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go index fb3ac98989..b0f1ecd46c 100644 --- a/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go +++ b/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go @@ -20,6 +20,11 @@ func (e simpleWarning) Description() Description { } func (e simpleWarning) Source() Source { - // No source information available for a native error + // No source information available for a simple warning return Source{} } + +func (e simpleWarning) FromExpr() *FromExpr { + // Simple warnings are not expression-related + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/sourceless.go b/vendor/github.com/hashicorp/terraform/tfdiags/sourceless.go new file mode 100644 index 0000000000..eaa27373db --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/sourceless.go @@ -0,0 +1,13 @@ +package tfdiags + +// Sourceless creates and returns a diagnostic with no source location +// information. This is generally used for operational-type errors that are +// caused by or relate to the environment where Terraform is running rather +// than to the provided configuration. +func Sourceless(severity Severity, summary, detail string) Diagnostic { + return diagnosticBase{ + severity: severity, + summary: summary, + detail: detail, + } +} diff --git a/vendor/github.com/hashicorp/terraform/version/version.go b/vendor/github.com/hashicorp/terraform/version/version.go index 5ae1be22ef..e6cc16506d 100644 --- a/vendor/github.com/hashicorp/terraform/version/version.go +++ b/vendor/github.com/hashicorp/terraform/version/version.go @@ -11,17 +11,21 @@ import ( ) // The main version number that is being run at the moment. -const Version = "0.11.9" +var Version = "0.12.0" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -var Prerelease = "" +var Prerelease = "dev" // SemVer is an instance of version.Version. This has the secondary // benefit of verifying during tests and init time that our version is a // proper semantic version, which should always be the case. -var SemVer = version.Must(version.NewVersion(Version)) +var SemVer *version.Version + +func init() { + SemVer = version.Must(version.NewVersion(Version)) +} // Header is the header name used to send the current terraform version // in http requests. diff --git a/vendor/github.com/hashicorp/yamux/session.go b/vendor/github.com/hashicorp/yamux/session.go index e17981839f..32ba02e023 100644 --- a/vendor/github.com/hashicorp/yamux/session.go +++ b/vendor/github.com/hashicorp/yamux/session.go @@ -123,6 +123,12 @@ func (s *Session) IsClosed() bool { } } +// CloseChan returns a read-only channel which is closed as +// soon as the session is closed. +func (s *Session) CloseChan() <-chan struct{} { + return s.shutdownCh +} + // NumStreams returns the number of currently open streams func (s *Session) NumStreams() int { s.streamLock.Lock() @@ -303,8 +309,10 @@ func (s *Session) keepalive() { case <-time.After(s.config.KeepAliveInterval): _, err := s.Ping() if err != nil { - s.logger.Printf("[ERR] yamux: keepalive failed: %v", err) - s.exitErr(ErrKeepAliveTimeout) + if err != ErrSessionShutdown { + s.logger.Printf("[ERR] yamux: keepalive failed: %v", err) + s.exitErr(ErrKeepAliveTimeout) + } return } case <-s.shutdownCh: @@ -323,8 +331,17 @@ func (s *Session) waitForSend(hdr header, body io.Reader) error { // potential shutdown. Since there's the expectation that sends can happen // in a timely manner, we enforce the connection write timeout here. func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) error { - timer := time.NewTimer(s.config.ConnectionWriteTimeout) - defer timer.Stop() + t := timerPool.Get() + timer := t.(*time.Timer) + timer.Reset(s.config.ConnectionWriteTimeout) + defer func() { + timer.Stop() + select { + case <-timer.C: + default: + } + timerPool.Put(t) + }() ready := sendReady{Hdr: hdr, Body: body, Err: errCh} select { @@ -349,8 +366,17 @@ func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) e // the send happens right here, we enforce the connection write timeout if we // can't queue the header to be sent. func (s *Session) sendNoWait(hdr header) error { - timer := time.NewTimer(s.config.ConnectionWriteTimeout) - defer timer.Stop() + t := timerPool.Get() + timer := t.(*time.Timer) + timer.Reset(s.config.ConnectionWriteTimeout) + defer func() { + timer.Stop() + select { + case <-timer.C: + default: + } + timerPool.Put(t) + }() select { case s.sendCh <- sendReady{Hdr: hdr}: @@ -408,11 +434,20 @@ func (s *Session) recv() { } } +// Ensure that the index of the handler (typeData/typeWindowUpdate/etc) matches the message type +var ( + handlers = []func(*Session, header) error{ + typeData: (*Session).handleStreamMessage, + typeWindowUpdate: (*Session).handleStreamMessage, + typePing: (*Session).handlePing, + typeGoAway: (*Session).handleGoAway, + } +) + // recvLoop continues to receive data until a fatal error is encountered func (s *Session) recvLoop() error { defer close(s.recvDoneCh) hdr := header(make([]byte, headerSize)) - var handler func(header) error for { // Read the header if _, err := io.ReadFull(s.bufRead, hdr); err != nil { @@ -428,22 +463,12 @@ func (s *Session) recvLoop() error { return ErrInvalidVersion } - // Switch on the type - switch hdr.MsgType() { - case typeData: - handler = s.handleStreamMessage - case typeWindowUpdate: - handler = s.handleStreamMessage - case typeGoAway: - handler = s.handleGoAway - case typePing: - handler = s.handlePing - default: + mt := hdr.MsgType() + if mt < typeData || mt > typeGoAway { return ErrInvalidMsgType } - // Invoke the handler - if err := handler(hdr); err != nil { + if err := handlers[mt](s, hdr); err != nil { return err } } diff --git a/vendor/github.com/hashicorp/yamux/stream.go b/vendor/github.com/hashicorp/yamux/stream.go index d216e281ca..aa23919739 100644 --- a/vendor/github.com/hashicorp/yamux/stream.go +++ b/vendor/github.com/hashicorp/yamux/stream.go @@ -47,8 +47,8 @@ type Stream struct { recvNotifyCh chan struct{} sendNotifyCh chan struct{} - readDeadline time.Time - writeDeadline time.Time + readDeadline atomic.Value // time.Time + writeDeadline atomic.Value // time.Time } // newStream is used to construct a new stream within @@ -67,6 +67,8 @@ func newStream(session *Session, id uint32, state streamState) *Stream { recvNotifyCh: make(chan struct{}, 1), sendNotifyCh: make(chan struct{}, 1), } + s.readDeadline.Store(time.Time{}) + s.writeDeadline.Store(time.Time{}) return s } @@ -122,8 +124,9 @@ START: WAIT: var timeout <-chan time.Time var timer *time.Timer - if !s.readDeadline.IsZero() { - delay := s.readDeadline.Sub(time.Now()) + readDeadline := s.readDeadline.Load().(time.Time) + if !readDeadline.IsZero() { + delay := readDeadline.Sub(time.Now()) timer = time.NewTimer(delay) timeout = timer.C } @@ -188,7 +191,7 @@ START: // Send the header s.sendHdr.encode(typeData, flags, s.id, max) - if err := s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil { + if err = s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil { return 0, err } @@ -200,8 +203,9 @@ START: WAIT: var timeout <-chan time.Time - if !s.writeDeadline.IsZero() { - delay := s.writeDeadline.Sub(time.Now()) + writeDeadline := s.writeDeadline.Load().(time.Time) + if !writeDeadline.IsZero() { + delay := writeDeadline.Sub(time.Now()) timeout = time.After(delay) } select { @@ -238,18 +242,25 @@ func (s *Stream) sendWindowUpdate() error { // Determine the delta update max := s.session.config.MaxStreamWindowSize - delta := max - atomic.LoadUint32(&s.recvWindow) + var bufLen uint32 + s.recvLock.Lock() + if s.recvBuf != nil { + bufLen = uint32(s.recvBuf.Len()) + } + delta := (max - bufLen) - s.recvWindow // Determine the flags if any flags := s.sendFlags() // Check if we can omit the update if delta < (max/2) && flags == 0 { + s.recvLock.Unlock() return nil } // Update our window - atomic.AddUint32(&s.recvWindow, delta) + s.recvWindow += delta + s.recvLock.Unlock() // Send the header s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta) @@ -392,16 +403,18 @@ func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error { if length == 0 { return nil } - if remain := atomic.LoadUint32(&s.recvWindow); length > remain { - s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, remain, length) - return ErrRecvWindowExceeded - } // Wrap in a limited reader conn = &io.LimitedReader{R: conn, N: int64(length)} // Copy into buffer s.recvLock.Lock() + + if length > s.recvWindow { + s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, s.recvWindow, length) + return ErrRecvWindowExceeded + } + if s.recvBuf == nil { // Allocate the receive buffer just-in-time to fit the full data frame. // This way we can read in the whole packet without further allocations. @@ -414,7 +427,7 @@ func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error { } // Decrement the receive window - atomic.AddUint32(&s.recvWindow, ^uint32(length-1)) + s.recvWindow -= length s.recvLock.Unlock() // Unblock any readers @@ -435,13 +448,13 @@ func (s *Stream) SetDeadline(t time.Time) error { // SetReadDeadline sets the deadline for future Read calls. func (s *Stream) SetReadDeadline(t time.Time) error { - s.readDeadline = t + s.readDeadline.Store(t) return nil } // SetWriteDeadline sets the deadline for future Write calls func (s *Stream) SetWriteDeadline(t time.Time) error { - s.writeDeadline = t + s.writeDeadline.Store(t) return nil } diff --git a/vendor/github.com/hashicorp/yamux/util.go b/vendor/github.com/hashicorp/yamux/util.go index 5fe45afcdf..8a73e9249a 100644 --- a/vendor/github.com/hashicorp/yamux/util.go +++ b/vendor/github.com/hashicorp/yamux/util.go @@ -1,5 +1,20 @@ package yamux +import ( + "sync" + "time" +) + +var ( + timerPool = &sync.Pool{ + New: func() interface{} { + timer := time.NewTimer(time.Hour * 1e6) + timer.Stop() + return timer + }, + } +) + // asyncSendErr is used to try an async send of an error func asyncSendErr(ch chan error, err error) { if ch == nil { diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go index 9cfa988bc5..8e26ffeecf 100644 --- a/vendor/github.com/jmespath/go-jmespath/api.go +++ b/vendor/github.com/jmespath/go-jmespath/api.go @@ -2,7 +2,7 @@ package jmespath import "strconv" -// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is +// JMESPath is the epresentation of a compiled JMES path query. A JMESPath is // safe for concurrent use by multiple goroutines. type JMESPath struct { ast ASTNode diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go index e17a5474e9..404e10ca02 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_windows.go +++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -29,6 +29,15 @@ const ( backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) ) +const ( + genericRead = 0x80000000 + genericWrite = 0x40000000 +) + +const ( + consoleTextmodeBuffer = 0x1 +) + type wchar uint16 type short int16 type dword uint32 @@ -69,14 +78,17 @@ var ( procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo") procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo") procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW") + procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer") ) // Writer provide colorable Writer to the console type Writer struct { - out io.Writer - handle syscall.Handle - oldattr word - oldpos coord + out io.Writer + handle syscall.Handle + althandle syscall.Handle + oldattr word + oldpos coord + rest bytes.Buffer } // NewColorable return new instance of Writer which handle escape sequence from File. @@ -407,7 +419,18 @@ func (w *Writer) Write(data []byte) (n int, err error) { var csbi consoleScreenBufferInfo procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) - er := bytes.NewReader(data) + handle := w.handle + + var er *bytes.Reader + if w.rest.Len() > 0 { + var rest bytes.Buffer + w.rest.WriteTo(&rest) + w.rest.Reset() + rest.Write(data) + er = bytes.NewReader(rest.Bytes()) + } else { + er = bytes.NewReader(data) + } var bw [1]byte loop: for { @@ -425,29 +448,55 @@ loop: break loop } - if c2 == ']' { - if err := doTitleSequence(er); err != nil { + switch c2 { + case '>': + continue + case ']': + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + if bytes.IndexByte(w.rest.Bytes(), 0x07) == -1 { break loop } + er = bytes.NewReader(w.rest.Bytes()[2:]) + err := doTitleSequence(er) + if err != nil { + break loop + } + w.rest.Reset() continue - } - if c2 != 0x5b { + // https://github.com/mattn/go-colorable/issues/27 + case '7': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + continue + case '8': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + continue + case 0x5b: + // execute part after switch + default: continue } + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + var buf bytes.Buffer var m byte - for { - c, err := er.ReadByte() - if err != nil { - break loop - } + for i, c := range w.rest.Bytes()[2:] { if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { m = c + er = bytes.NewReader(w.rest.Bytes()[2+i+1:]) + w.rest.Reset() break } buf.Write([]byte(string(c))) } + if m == 0 { + break loop + } switch m { case 'A': @@ -455,61 +504,64 @@ loop: if err != nil { continue } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.y -= short(n) - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'B': n, err = strconv.Atoi(buf.String()) if err != nil { continue } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.y += short(n) - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'C': n, err = strconv.Atoi(buf.String()) if err != nil { continue } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.x += short(n) - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'D': n, err = strconv.Atoi(buf.String()) if err != nil { continue } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.x -= short(n) - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + if csbi.cursorPosition.x < 0 { + csbi.cursorPosition.x = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'E': n, err = strconv.Atoi(buf.String()) if err != nil { continue } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.x = 0 csbi.cursorPosition.y += short(n) - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'F': n, err = strconv.Atoi(buf.String()) if err != nil { continue } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.x = 0 csbi.cursorPosition.y -= short(n) - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'G': n, err = strconv.Atoi(buf.String()) if err != nil { continue } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.x = short(n - 1) - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'H', 'f': - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) if buf.Len() > 0 { token := strings.Split(buf.String(), ";") switch len(token) { @@ -534,7 +586,7 @@ loop: } else { csbi.cursorPosition.y = 0 } - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'J': n := 0 if buf.Len() > 0 { @@ -545,20 +597,20 @@ loop: } var count, written dword var cursor coord - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) switch n { case 0: cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} - count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x) + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) case 1: cursor = coord{x: csbi.window.left, y: csbi.window.top} - count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.window.top-csbi.cursorPosition.y)*csbi.size.x) + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.window.top-csbi.cursorPosition.y)*dword(csbi.size.x) case 2: cursor = coord{x: csbi.window.left, y: csbi.window.top} - count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x) + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) } - procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) - procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) case 'K': n := 0 if buf.Len() > 0 { @@ -567,28 +619,28 @@ loop: continue } } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) var cursor coord var count, written dword switch n { case 0: - cursor = coord{x: csbi.cursorPosition.x + 1, y: csbi.cursorPosition.y} - count = dword(csbi.size.x - csbi.cursorPosition.x - 1) + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) case 1: - cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} count = dword(csbi.size.x - csbi.cursorPosition.x) case 2: - cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} count = dword(csbi.size.x) } - procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) - procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) case 'm': - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) attr := csbi.attributes cs := buf.String() if cs == "" { - procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr)) + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(w.oldattr)) continue } token := strings.Split(cs, ";") @@ -627,6 +679,21 @@ loop: attr |= n256foreAttr[n256] i += 2 } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= foregroundRed + } + if g > 127 { + attr |= foregroundGreen + } + if b > 127 { + attr |= foregroundBlue + } } else { attr = attr & (w.oldattr & backgroundMask) } @@ -654,6 +721,21 @@ loop: attr |= n256backAttr[n256] i += 2 } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= backgroundRed + } + if g > 127 { + attr |= backgroundGreen + } + if b > 127 { + attr |= backgroundBlue + } } else { attr = attr & (w.oldattr & foregroundMask) } @@ -685,38 +767,52 @@ loop: attr |= backgroundBlue } } - procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr)) + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(attr)) } } case 'h': var ci consoleCursorInfo cs := buf.String() if cs == "5>" { - procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) ci.visible = 0 - procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) } else if cs == "?25" { - procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) ci.visible = 1 - procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle == 0 { + h, _, _ := procCreateConsoleScreenBuffer.Call(uintptr(genericRead|genericWrite), 0, 0, uintptr(consoleTextmodeBuffer), 0, 0) + w.althandle = syscall.Handle(h) + if w.althandle != 0 { + handle = w.althandle + } + } } case 'l': var ci consoleCursorInfo cs := buf.String() if cs == "5>" { - procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) ci.visible = 1 - procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) } else if cs == "?25" { - procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) ci.visible = 0 - procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle != 0 { + syscall.CloseHandle(w.althandle) + w.althandle = 0 + handle = w.handle + } } case 's': - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) w.oldpos = csbi.cursorPosition case 'u': - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) } } diff --git a/vendor/github.com/mattn/go-colorable/go.mod b/vendor/github.com/mattn/go-colorable/go.mod new file mode 100644 index 0000000000..9d9f424854 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/go.mod @@ -0,0 +1,3 @@ +module github.com/mattn/go-colorable + +require github.com/mattn/go-isatty v0.0.5 diff --git a/vendor/github.com/mattn/go-colorable/go.sum b/vendor/github.com/mattn/go-colorable/go.sum new file mode 100644 index 0000000000..2c12960ec7 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/go.sum @@ -0,0 +1,4 @@ +github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/mattn/go-isatty/.travis.yml b/vendor/github.com/mattn/go-isatty/.travis.yml index b9f8b239c0..5597e026dd 100644 --- a/vendor/github.com/mattn/go-isatty/.travis.yml +++ b/vendor/github.com/mattn/go-isatty/.travis.yml @@ -2,6 +2,10 @@ language: go go: - tip +os: + - linux + - osx + before_install: - go get github.com/mattn/goveralls - go get golang.org/x/tools/cmd/cover diff --git a/vendor/github.com/mattn/go-isatty/go.mod b/vendor/github.com/mattn/go-isatty/go.mod new file mode 100644 index 0000000000..f310320c33 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.mod @@ -0,0 +1,3 @@ +module github.com/mattn/go-isatty + +require golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 diff --git a/vendor/github.com/mattn/go-isatty/go.sum b/vendor/github.com/mattn/go-isatty/go.sum new file mode 100644 index 0000000000..426c8973c0 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.sum @@ -0,0 +1,2 @@ +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/mattn/go-isatty/isatty_appengine.go b/vendor/github.com/mattn/go-isatty/isatty_appengine.go deleted file mode 100644 index 9584a98842..0000000000 --- a/vendor/github.com/mattn/go-isatty/isatty_appengine.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build appengine - -package isatty - -// IsTerminal returns true if the file descriptor is terminal which -// is always false on on appengine classic which is a sandboxed PaaS. -func IsTerminal(fd uintptr) bool { - return false -} - -// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 -// terminal. This is also always false on this environment. -func IsCygwinTerminal(fd uintptr) bool { - return false -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go index 42f2514d13..07e93039db 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_bsd.go +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -16,3 +16,9 @@ func IsTerminal(fd uintptr) bool { _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) return err == 0 } + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux.go b/vendor/github.com/mattn/go-isatty/isatty_linux.go index 7384cf9916..e004038ee7 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_linux.go +++ b/vendor/github.com/mattn/go-isatty/isatty_linux.go @@ -1,18 +1,18 @@ // +build linux -// +build !appengine,!ppc64,!ppc64le +// +build !appengine package isatty -import ( - "syscall" - "unsafe" -) - -const ioctlReadTermios = syscall.TCGETS +import "golang.org/x/sys/unix" // IsTerminal return true if the file descriptor is terminal. func IsTerminal(fd uintptr) bool { - var termios syscall.Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 + _, err := unix.IoctlGetTermios(int(fd), unix.TCGETS) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false } diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go b/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go deleted file mode 100644 index 44e5d21302..0000000000 --- a/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build linux -// +build ppc64 ppc64le - -package isatty - -import ( - "unsafe" - - syscall "golang.org/x/sys/unix" -) - -const ioctlReadTermios = syscall.TCGETS - -// IsTerminal return true if the file descriptor is terminal. -func IsTerminal(fd uintptr) bool { - var termios syscall.Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go index ff4de3d9a5..f02849c56f 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_others.go +++ b/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -1,8 +1,13 @@ -// +build !windows -// +build !appengine +// +build appengine js package isatty +// IsTerminal returns true if the file descriptor is terminal which +// is always false on js and appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} + // IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 // terminal. This is also always false on this environment. func IsCygwinTerminal(fd uintptr) bool { diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go index 1f0c6bf53d..bdd5c79a07 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_solaris.go +++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -14,3 +14,9 @@ func IsTerminal(fd uintptr) bool { err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) return err == nil } + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mitchellh/colorstring/.travis.yml b/vendor/github.com/mitchellh/colorstring/.travis.yml new file mode 100644 index 0000000000..74e286ae12 --- /dev/null +++ b/vendor/github.com/mitchellh/colorstring/.travis.yml @@ -0,0 +1,15 @@ +language: go + +go: + - 1.0 + - 1.1 + - 1.2 + - 1.3 + - tip + +script: + - go test + +matrix: + allow_failures: + - go: tip diff --git a/vendor/github.com/mitchellh/colorstring/LICENSE b/vendor/github.com/mitchellh/colorstring/LICENSE new file mode 100644 index 0000000000..2298515904 --- /dev/null +++ b/vendor/github.com/mitchellh/colorstring/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/colorstring/README.md b/vendor/github.com/mitchellh/colorstring/README.md new file mode 100644 index 0000000000..0654d454de --- /dev/null +++ b/vendor/github.com/mitchellh/colorstring/README.md @@ -0,0 +1,30 @@ +# colorstring [![Build Status](https://travis-ci.org/mitchellh/colorstring.svg)](https://travis-ci.org/mitchellh/colorstring) + +colorstring is a [Go](http://www.golang.org) library for outputting colored +strings to a console using a simple inline syntax in your string to specify +the color to print as. + +For example, the string `[blue]hello [red]world` would output the text +"hello world" in two colors. The API of colorstring allows for easily disabling +colors, adding aliases, etc. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/colorstring +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/colorstring). + +Usage is easy enough: + +```go +colorstring.Println("[blue]Hello [red]World!") +``` + +Additionally, the `Colorize` struct can be used to set options such as +custom colors, color disabling, etc. diff --git a/vendor/github.com/mitchellh/colorstring/colorstring.go b/vendor/github.com/mitchellh/colorstring/colorstring.go new file mode 100644 index 0000000000..3de5b241d9 --- /dev/null +++ b/vendor/github.com/mitchellh/colorstring/colorstring.go @@ -0,0 +1,244 @@ +// colorstring provides functions for colorizing strings for terminal +// output. +package colorstring + +import ( + "bytes" + "fmt" + "io" + "regexp" + "strings" +) + +// Color colorizes your strings using the default settings. +// +// Strings given to Color should use the syntax `[color]` to specify the +// color for text following. For example: `[blue]Hello` will return "Hello" +// in blue. See DefaultColors for all the supported colors and attributes. +// +// If an unrecognized color is given, it is ignored and assumed to be part +// of the string. For example: `[hi]world` will result in "[hi]world". +// +// A color reset is appended to the end of every string. This will reset +// the color of following strings when you output this text to the same +// terminal session. +// +// If you want to customize any of this behavior, use the Colorize struct. +func Color(v string) string { + return def.Color(v) +} + +// ColorPrefix returns the color sequence that prefixes the given text. +// +// This is useful when wrapping text if you want to inherit the color +// of the wrapped text. For example, "[green]foo" will return "[green]". +// If there is no color sequence, then this will return "". +func ColorPrefix(v string) string { + return def.ColorPrefix(v) +} + +// Colorize colorizes your strings, giving you the ability to customize +// some of the colorization process. +// +// The options in Colorize can be set to customize colorization. If you're +// only interested in the defaults, just use the top Color function directly, +// which creates a default Colorize. +type Colorize struct { + // Colors maps a color string to the code for that color. The code + // is a string so that you can use more complex colors to set foreground, + // background, attributes, etc. For example, "boldblue" might be + // "1;34" + Colors map[string]string + + // If true, color attributes will be ignored. This is useful if you're + // outputting to a location that doesn't support colors and you just + // want the strings returned. + Disable bool + + // Reset, if true, will reset the color after each colorization by + // adding a reset code at the end. + Reset bool +} + +// Color colorizes a string according to the settings setup in the struct. +// +// For more details on the syntax, see the top-level Color function. +func (c *Colorize) Color(v string) string { + matches := parseRe.FindAllStringIndex(v, -1) + if len(matches) == 0 { + return v + } + + result := new(bytes.Buffer) + colored := false + m := []int{0, 0} + for _, nm := range matches { + // Write the text in between this match and the last + result.WriteString(v[m[1]:nm[0]]) + m = nm + + var replace string + if code, ok := c.Colors[v[m[0]+1:m[1]-1]]; ok { + colored = true + + if !c.Disable { + replace = fmt.Sprintf("\033[%sm", code) + } + } else { + replace = v[m[0]:m[1]] + } + + result.WriteString(replace) + } + result.WriteString(v[m[1]:]) + + if colored && c.Reset && !c.Disable { + // Write the clear byte at the end + result.WriteString("\033[0m") + } + + return result.String() +} + +// ColorPrefix returns the first color sequence that exists in this string. +// +// For example: "[green]foo" would return "[green]". If no color sequence +// exists, then "" is returned. This is especially useful when wrapping +// colored texts to inherit the color of the wrapped text. +func (c *Colorize) ColorPrefix(v string) string { + return prefixRe.FindString(strings.TrimSpace(v)) +} + +// DefaultColors are the default colors used when colorizing. +// +// If the color is surrounded in underscores, such as "_blue_", then that +// color will be used for the background color. +var DefaultColors map[string]string + +func init() { + DefaultColors = map[string]string{ + // Default foreground/background colors + "default": "39", + "_default_": "49", + + // Foreground colors + "black": "30", + "red": "31", + "green": "32", + "yellow": "33", + "blue": "34", + "magenta": "35", + "cyan": "36", + "light_gray": "37", + "dark_gray": "90", + "light_red": "91", + "light_green": "92", + "light_yellow": "93", + "light_blue": "94", + "light_magenta": "95", + "light_cyan": "96", + "white": "97", + + // Background colors + "_black_": "40", + "_red_": "41", + "_green_": "42", + "_yellow_": "43", + "_blue_": "44", + "_magenta_": "45", + "_cyan_": "46", + "_light_gray_": "47", + "_dark_gray_": "100", + "_light_red_": "101", + "_light_green_": "102", + "_light_yellow_": "103", + "_light_blue_": "104", + "_light_magenta_": "105", + "_light_cyan_": "106", + "_white_": "107", + + // Attributes + "bold": "1", + "dim": "2", + "underline": "4", + "blink_slow": "5", + "blink_fast": "6", + "invert": "7", + "hidden": "8", + + // Reset to reset everything to their defaults + "reset": "0", + "reset_bold": "21", + } + + def = Colorize{ + Colors: DefaultColors, + Reset: true, + } +} + +var def Colorize +var parseReRaw = `\[[a-z0-9_-]+\]` +var parseRe = regexp.MustCompile(`(?i)` + parseReRaw) +var prefixRe = regexp.MustCompile(`^(?i)(` + parseReRaw + `)+`) + +// Print is a convenience wrapper for fmt.Print with support for color codes. +// +// Print formats using the default formats for its operands and writes to +// standard output with support for color codes. Spaces are added between +// operands when neither is a string. It returns the number of bytes written +// and any write error encountered. +func Print(a string) (n int, err error) { + return fmt.Print(Color(a)) +} + +// Println is a convenience wrapper for fmt.Println with support for color +// codes. +// +// Println formats using the default formats for its operands and writes to +// standard output with support for color codes. Spaces are always added +// between operands and a newline is appended. It returns the number of bytes +// written and any write error encountered. +func Println(a string) (n int, err error) { + return fmt.Println(Color(a)) +} + +// Printf is a convenience wrapper for fmt.Printf with support for color codes. +// +// Printf formats according to a format specifier and writes to standard output +// with support for color codes. It returns the number of bytes written and any +// write error encountered. +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(Color(format), a...) +} + +// Fprint is a convenience wrapper for fmt.Fprint with support for color codes. +// +// Fprint formats using the default formats for its operands and writes to w +// with support for color codes. Spaces are added between operands when neither +// is a string. It returns the number of bytes written and any write error +// encountered. +func Fprint(w io.Writer, a string) (n int, err error) { + return fmt.Fprint(w, Color(a)) +} + +// Fprintln is a convenience wrapper for fmt.Fprintln with support for color +// codes. +// +// Fprintln formats using the default formats for its operands and writes to w +// with support for color codes. Spaces are always added between operands and a +// newline is appended. It returns the number of bytes written and any write +// error encountered. +func Fprintln(w io.Writer, a string) (n int, err error) { + return fmt.Fprintln(w, Color(a)) +} + +// Fprintf is a convenience wrapper for fmt.Fprintf with support for color +// codes. +// +// Fprintf formats according to a format specifier and writes to w with support +// for color codes. It returns the number of bytes written and any write error +// encountered. +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, Color(format), a...) +} diff --git a/vendor/github.com/mitchellh/colorstring/go.mod b/vendor/github.com/mitchellh/colorstring/go.mod new file mode 100644 index 0000000000..446ff8d307 --- /dev/null +++ b/vendor/github.com/mitchellh/colorstring/go.mod @@ -0,0 +1 @@ +module github.com/mitchellh/colorstring diff --git a/vendor/github.com/oklog/run/.gitignore b/vendor/github.com/oklog/run/.gitignore new file mode 100644 index 0000000000..a1338d6851 --- /dev/null +++ b/vendor/github.com/oklog/run/.gitignore @@ -0,0 +1,14 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ diff --git a/vendor/github.com/oklog/run/.travis.yml b/vendor/github.com/oklog/run/.travis.yml new file mode 100644 index 0000000000..362bdd41c0 --- /dev/null +++ b/vendor/github.com/oklog/run/.travis.yml @@ -0,0 +1,12 @@ +language: go +sudo: false +go: + - 1.x + - tip +install: + - go get -v github.com/golang/lint/golint + - go build ./... +script: + - go vet ./... + - $HOME/gopath/bin/golint . + - go test -v -race ./... diff --git a/vendor/github.com/oklog/run/LICENSE b/vendor/github.com/oklog/run/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/oklog/run/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/oklog/run/README.md b/vendor/github.com/oklog/run/README.md new file mode 100644 index 0000000000..a7228cd9a3 --- /dev/null +++ b/vendor/github.com/oklog/run/README.md @@ -0,0 +1,73 @@ +# run + +[![GoDoc](https://godoc.org/github.com/oklog/run?status.svg)](https://godoc.org/github.com/oklog/run) +[![Build Status](https://travis-ci.org/oklog/run.svg?branch=master)](https://travis-ci.org/oklog/run) +[![Go Report Card](https://goreportcard.com/badge/github.com/oklog/run)](https://goreportcard.com/report/github.com/oklog/run) +[![Apache 2 licensed](https://img.shields.io/badge/license-Apache2-blue.svg)](https://raw.githubusercontent.com/oklog/run/master/LICENSE) + +run.Group is a universal mechanism to manage goroutine lifecycles. + +Create a zero-value run.Group, and then add actors to it. Actors are defined as +a pair of functions: an **execute** function, which should run synchronously; +and an **interrupt** function, which, when invoked, should cause the execute +function to return. Finally, invoke Run, which blocks until the first actor +returns. This general-purpose API allows callers to model pretty much any +runnable task, and achieve well-defined lifecycle semantics for the group. + +run.Group was written to manage component lifecycles in func main for +[OK Log](https://github.com/oklog/oklog). +But it's useful in any circumstance where you need to orchestrate multiple +goroutines as a unit whole. +[Click here](https://www.youtube.com/watch?v=LHe1Cb_Ud_M&t=15m45s) to see a +video of a talk where run.Group is described. + +## Examples + +### context.Context + +```go +ctx, cancel := context.WithCancel(context.Background()) +g.Add(func() error { + return myProcess(ctx, ...) +}, func(error) { + cancel() +}) +``` + +### net.Listener + +```go +ln, _ := net.Listen("tcp", ":8080") +g.Add(func() error { + return http.Serve(ln, nil) +}, func(error) { + ln.Close() +}) +``` + +### io.ReadCloser + +```go +var conn io.ReadCloser = ... +g.Add(func() error { + s := bufio.NewScanner(conn) + for s.Scan() { + println(s.Text()) + } + return s.Err() +}, func(error) { + conn.Close() +}) +``` + +## Comparisons + +Package run is somewhat similar to package +[errgroup](https://godoc.org/golang.org/x/sync/errgroup), +except it doesn't require actor goroutines to understand context semantics. + +It's somewhat similar to package +[tomb.v1](https://godoc.org/gopkg.in/tomb.v1) or +[tomb.v2](https://godoc.org/gopkg.in/tomb.v2), +except it has a much smaller API surface, delegating e.g. staged shutdown of +goroutines to the caller. diff --git a/vendor/github.com/oklog/run/group.go b/vendor/github.com/oklog/run/group.go new file mode 100644 index 0000000000..832d47dd16 --- /dev/null +++ b/vendor/github.com/oklog/run/group.go @@ -0,0 +1,62 @@ +// Package run implements an actor-runner with deterministic teardown. It is +// somewhat similar to package errgroup, except it does not require actor +// goroutines to understand context semantics. This makes it suitable for use in +// more circumstances; for example, goroutines which are handling connections +// from net.Listeners, or scanning input from a closable io.Reader. +package run + +// Group collects actors (functions) and runs them concurrently. +// When one actor (function) returns, all actors are interrupted. +// The zero value of a Group is useful. +type Group struct { + actors []actor +} + +// Add an actor (function) to the group. Each actor must be pre-emptable by an +// interrupt function. That is, if interrupt is invoked, execute should return. +// Also, it must be safe to call interrupt even after execute has returned. +// +// The first actor (function) to return interrupts all running actors. +// The error is passed to the interrupt functions, and is returned by Run. +func (g *Group) Add(execute func() error, interrupt func(error)) { + g.actors = append(g.actors, actor{execute, interrupt}) +} + +// Run all actors (functions) concurrently. +// When the first actor returns, all others are interrupted. +// Run only returns when all actors have exited. +// Run returns the error returned by the first exiting actor. +func (g *Group) Run() error { + if len(g.actors) == 0 { + return nil + } + + // Run each actor. + errors := make(chan error, len(g.actors)) + for _, a := range g.actors { + go func(a actor) { + errors <- a.execute() + }(a) + } + + // Wait for the first actor to stop. + err := <-errors + + // Signal all actors to stop. + for _, a := range g.actors { + a.interrupt(err) + } + + // Wait for all actors to stop. + for i := 1; i < cap(errors); i++ { + <-errors + } + + // Return the original error. + return err +} + +type actor struct { + execute func() error + interrupt func(error) +} diff --git a/vendor/github.com/posener/complete/.gitignore b/vendor/github.com/posener/complete/.gitignore index 1363720813..293955f99a 100644 --- a/vendor/github.com/posener/complete/.gitignore +++ b/vendor/github.com/posener/complete/.gitignore @@ -1,2 +1,4 @@ .idea coverage.txt +gocomplete/gocomplete +example/self/self diff --git a/vendor/github.com/posener/complete/.travis.yml b/vendor/github.com/posener/complete/.travis.yml index c2798f8f37..2fae945419 100644 --- a/vendor/github.com/posener/complete/.travis.yml +++ b/vendor/github.com/posener/complete/.travis.yml @@ -1,17 +1,16 @@ language: go sudo: false go: + - 1.11 + - 1.10.x - 1.9 - 1.8 before_install: - go get -u -t ./... - - go get -u gopkg.in/alecthomas/gometalinter.v1 - - gometalinter.v1 --install script: - - gometalinter.v1 --config metalinter.json ./... - - ./test.sh + - GO111MODULE=on ./test.sh after_success: - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/posener/complete/args.go b/vendor/github.com/posener/complete/args.go index 1ba4d69198..17ab2c6d62 100644 --- a/vendor/github.com/posener/complete/args.go +++ b/vendor/github.com/posener/complete/args.go @@ -57,11 +57,20 @@ func newArgs(line string) Args { } } +// splitFields returns a list of fields from the given command line. +// If the last character is space, it appends an empty field in the end +// indicating that the field before it was completed. +// If the last field is of the form "a=b", it splits it to two fields: "a", "b", +// So it can be completed. func splitFields(line string) []string { parts := strings.Fields(line) + + // Add empty field if the last field was completed. if len(line) > 0 && unicode.IsSpace(rune(line[len(line)-1])) { parts = append(parts, "") } + + // Treat the last field if it is of the form "a=b" parts = splitLastEqual(parts) return parts } diff --git a/vendor/github.com/posener/complete/cmd/cmd.go b/vendor/github.com/posener/complete/cmd/cmd.go index 7137dee178..b99fe52901 100644 --- a/vendor/github.com/posener/complete/cmd/cmd.go +++ b/vendor/github.com/posener/complete/cmd/cmd.go @@ -103,7 +103,7 @@ func (f *CLI) AddFlags(flags *flag.FlagSet) { fmt.Sprintf("Uninstall completion for %s command", f.Name)) } if flags.Lookup("y") == nil { - flags.BoolVar(&f.yes, "y", false, "Don't prompt user for typing 'yes'") + flags.BoolVar(&f.yes, "y", false, "Don't prompt user for typing 'yes' when installing completion") } } diff --git a/vendor/github.com/posener/complete/cmd/install/fish.go b/vendor/github.com/posener/complete/cmd/install/fish.go index f04e7c3ac6..6467196bc0 100644 --- a/vendor/github.com/posener/complete/cmd/install/fish.go +++ b/vendor/github.com/posener/complete/cmd/install/fish.go @@ -16,7 +16,10 @@ type fish struct { func (f fish) Install(cmd, bin string) error { completionFile := filepath.Join(f.configDir, "completions", fmt.Sprintf("%s.fish", cmd)) - completeCmd := f.cmd(cmd, bin) + completeCmd, err := f.cmd(cmd, bin) + if err != nil { + return err + } if _, err := os.Stat(completionFile); err == nil { return fmt.Errorf("already installed at %s", completionFile) } @@ -33,10 +36,10 @@ func (f fish) Uninstall(cmd, bin string) error { return os.Remove(completionFile) } -func (f fish) cmd(cmd, bin string) string { +func (f fish) cmd(cmd, bin string) (string, error) { var buf bytes.Buffer params := struct{ Cmd, Bin string }{cmd, bin} - template.Must(template.New("cmd").Parse(` + tmpl := template.Must(template.New("cmd").Parse(` function __complete_{{.Cmd}} set -lx COMP_LINE (string join ' ' (commandline -o)) test (commandline -ct) = "" @@ -44,7 +47,10 @@ function __complete_{{.Cmd}} {{.Bin}} end complete -c {{.Cmd}} -a "(__complete_{{.Cmd}})" -`)).Execute(&buf, params) - - return string(buf.Bytes()) +`)) + err := tmpl.Execute(&buf, params) + if err != nil { + return "", err + } + return buf.String(), nil } diff --git a/vendor/github.com/posener/complete/cmd/install/install.go b/vendor/github.com/posener/complete/cmd/install/install.go index 4a706242f4..dfa1963b89 100644 --- a/vendor/github.com/posener/complete/cmd/install/install.go +++ b/vendor/github.com/posener/complete/cmd/install/install.go @@ -51,7 +51,7 @@ func Uninstall(cmd string) error { for _, i := range is { errI := i.Uninstall(cmd, bin) if errI != nil { - multierror.Append(err, errI) + err = multierror.Append(err, errI) } } diff --git a/vendor/github.com/posener/complete/cmd/install/utils.go b/vendor/github.com/posener/complete/cmd/install/utils.go index bb709bc6cd..d34ac8cae8 100644 --- a/vendor/github.com/posener/complete/cmd/install/utils.go +++ b/vendor/github.com/posener/complete/cmd/install/utils.go @@ -115,7 +115,10 @@ func removeContentToTempFile(name, content string) (string, error) { if str == content { continue } - wf.WriteString(str + "\n") + _, err = wf.WriteString(str + "\n") + if err != nil { + return "", err + } prefix = prefix[:0] } return wf.Name(), nil diff --git a/vendor/github.com/posener/complete/complete.go b/vendor/github.com/posener/complete/complete.go index 185d1e8bd2..725c4debca 100644 --- a/vendor/github.com/posener/complete/complete.go +++ b/vendor/github.com/posener/complete/complete.go @@ -10,14 +10,16 @@ import ( "fmt" "io" "os" + "strconv" "github.com/posener/complete/cmd" "github.com/posener/complete/match" ) const ( - envComplete = "COMP_LINE" - envDebug = "COMP_DEBUG" + envLine = "COMP_LINE" + envPoint = "COMP_POINT" + envDebug = "COMP_DEBUG" ) // Complete structs define completion for a command with CLI options @@ -55,13 +57,18 @@ func (c *Complete) Run() bool { // For installation: it assumes that flags were added and parsed before // it was called. func (c *Complete) Complete() bool { - line, ok := getLine() + line, point, ok := getEnv() if !ok { // make sure flags parsed, // in case they were not added in the main program return c.CLI.Run() } - Log("Completing line: %s", line) + + if point >= 0 && point < len(line) { + line = line[:point] + } + + Log("Completing phrase: %s", line) a := newArgs(line) Log("Completing last field: %s", a.Last) options := c.Command.Predict(a) @@ -79,12 +86,19 @@ func (c *Complete) Complete() bool { return true } -func getLine() (string, bool) { - line := os.Getenv(envComplete) +func getEnv() (line string, point int, ok bool) { + line = os.Getenv(envLine) if line == "" { - return "", false + return + } + point, err := strconv.Atoi(os.Getenv(envPoint)) + if err != nil { + // If failed parsing point for some reason, set it to point + // on the end of the line. + Log("Failed parsing point %s: %v", os.Getenv(envPoint), err) + point = len(line) } - return line, true + return line, point, true } func (c *Complete) output(options []string) { diff --git a/vendor/github.com/posener/complete/go.mod b/vendor/github.com/posener/complete/go.mod new file mode 100644 index 0000000000..fef0c440d0 --- /dev/null +++ b/vendor/github.com/posener/complete/go.mod @@ -0,0 +1,3 @@ +module github.com/posener/complete + +require github.com/hashicorp/go-multierror v1.0.0 diff --git a/vendor/github.com/posener/complete/go.sum b/vendor/github.com/posener/complete/go.sum new file mode 100644 index 0000000000..d2f13301d8 --- /dev/null +++ b/vendor/github.com/posener/complete/go.sum @@ -0,0 +1,4 @@ +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= diff --git a/vendor/github.com/posener/complete/log.go b/vendor/github.com/posener/complete/log.go index 797a80ced3..c3029556e5 100644 --- a/vendor/github.com/posener/complete/log.go +++ b/vendor/github.com/posener/complete/log.go @@ -1,7 +1,6 @@ package complete import ( - "io" "io/ioutil" "log" "os" @@ -15,7 +14,7 @@ import ( var Log = getLogger() func getLogger() func(format string, args ...interface{}) { - var logfile io.Writer = ioutil.Discard + var logfile = ioutil.Discard if os.Getenv(envDebug) != "" { logfile = os.Stderr } diff --git a/vendor/github.com/posener/complete/metalinter.json b/vendor/github.com/posener/complete/metalinter.json deleted file mode 100644 index 799c1d03f2..0000000000 --- a/vendor/github.com/posener/complete/metalinter.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "Vendor": true, - "DisableAll": true, - "Enable": [ - "gofmt", - "goimports", - "interfacer", - "goconst", - "misspell", - "unconvert", - "gosimple", - "golint", - "structcheck", - "deadcode", - "vet" - ], - "Exclude": [ - "initTests is unused" - ], - "Deadline": "2m" -} diff --git a/vendor/github.com/posener/complete/readme.md b/vendor/github.com/posener/complete/readme.md index 2a999ba762..6d757ef82e 100644 --- a/vendor/github.com/posener/complete/readme.md +++ b/vendor/github.com/posener/complete/readme.md @@ -4,6 +4,7 @@ A tool for bash writing bash completion in go, and bash completion for the go co [![Build Status](https://travis-ci.org/posener/complete.svg?branch=master)](https://travis-ci.org/posener/complete) [![codecov](https://codecov.io/gh/posener/complete/branch/master/graph/badge.svg)](https://codecov.io/gh/posener/complete) +[![golangci](https://golangci.com/badges/github.com/posener/complete.svg)](https://golangci.com/r/github.com/posener/complete) [![GoDoc](https://godoc.org/github.com/posener/complete?status.svg)](http://godoc.org/github.com/posener/complete) [![Go Report Card](https://goreportcard.com/badge/github.com/posener/complete)](https://goreportcard.com/report/github.com/posener/complete) diff --git a/vendor/github.com/spf13/afero/.travis.yml b/vendor/github.com/spf13/afero/.travis.yml new file mode 100644 index 0000000000..0637db726d --- /dev/null +++ b/vendor/github.com/spf13/afero/.travis.yml @@ -0,0 +1,21 @@ +sudo: false +language: go + +go: + - 1.9 + - "1.10" + - tip + +os: + - linux + - osx + +matrix: + allow_failures: + - go: tip + fast_finish: true + +script: + - go build + - go test -race -v ./... + diff --git a/vendor/github.com/spf13/afero/LICENSE.txt b/vendor/github.com/spf13/afero/LICENSE.txt new file mode 100644 index 0000000000..298f0e2665 --- /dev/null +++ b/vendor/github.com/spf13/afero/LICENSE.txt @@ -0,0 +1,174 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md new file mode 100644 index 0000000000..0c9b04b53f --- /dev/null +++ b/vendor/github.com/spf13/afero/README.md @@ -0,0 +1,452 @@ +![afero logo-sm](https://cloud.githubusercontent.com/assets/173412/11490338/d50e16dc-97a5-11e5-8b12-019a300d0fcb.png) + +A FileSystem Abstraction System for Go + +[![Build Status](https://travis-ci.org/spf13/afero.svg)](https://travis-ci.org/spf13/afero) [![Build status](https://ci.appveyor.com/api/projects/status/github/spf13/afero?branch=master&svg=true)](https://ci.appveyor.com/project/spf13/afero) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) + +# Overview + +Afero is an filesystem framework providing a simple, uniform and universal API +interacting with any filesystem, as an abstraction layer providing interfaces, +types and methods. Afero has an exceptionally clean interface and simple design +without needless constructors or initialization methods. + +Afero is also a library providing a base set of interoperable backend +filesystems that make it easy to work with afero while retaining all the power +and benefit of the os and ioutil packages. + +Afero provides significant improvements over using the os package alone, most +notably the ability to create mock and testing filesystems without relying on the disk. + +It is suitable for use in a any situation where you would consider using the OS +package as it provides an additional abstraction that makes it easy to use a +memory backed file system during testing. It also adds support for the http +filesystem for full interoperability. + + +## Afero Features + +* A single consistent API for accessing a variety of filesystems +* Interoperation between a variety of file system types +* A set of interfaces to encourage and enforce interoperability between backends +* An atomic cross platform memory backed file system +* Support for compositional (union) file systems by combining multiple file systems acting as one +* Specialized backends which modify existing filesystems (Read Only, Regexp filtered) +* A set of utility functions ported from io, ioutil & hugo to be afero aware + + +# Using Afero + +Afero is easy to use and easier to adopt. + +A few different ways you could use Afero: + +* Use the interfaces alone to define you own file system. +* Wrap for the OS packages. +* Define different filesystems for different parts of your application. +* Use Afero for mock filesystems while testing + +## Step 1: Install Afero + +First use go get to install the latest version of the library. + + $ go get github.com/spf13/afero + +Next include Afero in your application. +```go +import "github.com/spf13/afero" +``` + +## Step 2: Declare a backend + +First define a package variable and set it to a pointer to a filesystem. +```go +var AppFs = afero.NewMemMapFs() + +or + +var AppFs = afero.NewOsFs() +``` +It is important to note that if you repeat the composite literal you +will be using a completely new and isolated filesystem. In the case of +OsFs it will still use the same underlying filesystem but will reduce +the ability to drop in other filesystems as desired. + +## Step 3: Use it like you would the OS package + +Throughout your application use any function and method like you normally +would. + +So if my application before had: +```go +os.Open('/tmp/foo') +``` +We would replace it with: +```go +AppFs.Open('/tmp/foo') +``` + +`AppFs` being the variable we defined above. + + +## List of all available functions + +File System Methods Available: +```go +Chmod(name string, mode os.FileMode) : error +Chtimes(name string, atime time.Time, mtime time.Time) : error +Create(name string) : File, error +Mkdir(name string, perm os.FileMode) : error +MkdirAll(path string, perm os.FileMode) : error +Name() : string +Open(name string) : File, error +OpenFile(name string, flag int, perm os.FileMode) : File, error +Remove(name string) : error +RemoveAll(path string) : error +Rename(oldname, newname string) : error +Stat(name string) : os.FileInfo, error +``` +File Interfaces and Methods Available: +```go +io.Closer +io.Reader +io.ReaderAt +io.Seeker +io.Writer +io.WriterAt + +Name() : string +Readdir(count int) : []os.FileInfo, error +Readdirnames(n int) : []string, error +Stat() : os.FileInfo, error +Sync() : error +Truncate(size int64) : error +WriteString(s string) : ret int, err error +``` +In some applications it may make sense to define a new package that +simply exports the file system variable for easy access from anywhere. + +## Using Afero's utility functions + +Afero provides a set of functions to make it easier to use the underlying file systems. +These functions have been primarily ported from io & ioutil with some developed for Hugo. + +The afero utilities support all afero compatible backends. + +The list of utilities includes: + +```go +DirExists(path string) (bool, error) +Exists(path string) (bool, error) +FileContainsBytes(filename string, subslice []byte) (bool, error) +GetTempDir(subPath string) string +IsDir(path string) (bool, error) +IsEmpty(path string) (bool, error) +ReadDir(dirname string) ([]os.FileInfo, error) +ReadFile(filename string) ([]byte, error) +SafeWriteReader(path string, r io.Reader) (err error) +TempDir(dir, prefix string) (name string, err error) +TempFile(dir, prefix string) (f File, err error) +Walk(root string, walkFn filepath.WalkFunc) error +WriteFile(filename string, data []byte, perm os.FileMode) error +WriteReader(path string, r io.Reader) (err error) +``` +For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero) + +They are available under two different approaches to use. You can either call +them directly where the first parameter of each function will be the file +system, or you can declare a new `Afero`, a custom type used to bind these +functions as methods to a given filesystem. + +### Calling utilities directly + +```go +fs := new(afero.MemMapFs) +f, err := afero.TempFile(fs,"", "ioutil-test") + +``` + +### Calling via Afero + +```go +fs := afero.NewMemMapFs() +afs := &afero.Afero{Fs: fs} +f, err := afs.TempFile("", "ioutil-test") +``` + +## Using Afero for Testing + +There is a large benefit to using a mock filesystem for testing. It has a +completely blank state every time it is initialized and can be easily +reproducible regardless of OS. You could create files to your heart’s content +and the file access would be fast while also saving you from all the annoying +issues with deleting temporary files, Windows file locking, etc. The MemMapFs +backend is perfect for testing. + +* Much faster than performing I/O operations on disk +* Avoid security issues and permissions +* Far more control. 'rm -rf /' with confidence +* Test setup is far more easier to do +* No test cleanup needed + +One way to accomplish this is to define a variable as mentioned above. +In your application this will be set to afero.NewOsFs() during testing you +can set it to afero.NewMemMapFs(). + +It wouldn't be uncommon to have each test initialize a blank slate memory +backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere +appropriate in my application code. This approach ensures that Tests are order +independent, with no test relying on the state left by an earlier test. + +Then in my tests I would initialize a new MemMapFs for each test: +```go +func TestExist(t *testing.T) { + appFS := afero.NewMemMapFs() + // create test files and directories + appFS.MkdirAll("src/a", 0755) + afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644) + afero.WriteFile(appFS, "src/c", []byte("file c"), 0644) + name := "src/c" + _, err := appFS.Stat(name) + if os.IsNotExist(err) { + t.Errorf("file \"%s\" does not exist.\n", name) + } +} +``` + +# Available Backends + +## Operating System Native + +### OsFs + +The first is simply a wrapper around the native OS calls. This makes it +very easy to use as all of the calls are the same as the existing OS +calls. It also makes it trivial to have your code use the OS during +operation and a mock filesystem during testing or as needed. + +```go +appfs := afero.NewOsFs() +appfs.MkdirAll("src/a", 0755)) +``` + +## Memory Backed Storage + +### MemMapFs + +Afero also provides a fully atomic memory backed filesystem perfect for use in +mocking and to speed up unnecessary disk io when persistence isn’t +necessary. It is fully concurrent and will work within go routines +safely. + +```go +mm := afero.NewMemMapFs() +mm.MkdirAll("src/a", 0755)) +``` + +#### InMemoryFile + +As part of MemMapFs, Afero also provides an atomic, fully concurrent memory +backed file implementation. This can be used in other memory backed file +systems with ease. Plans are to add a radix tree memory stored file +system using InMemoryFile. + +## Network Interfaces + +### SftpFs + +Afero has experimental support for secure file transfer protocol (sftp). Which can +be used to perform file operations over a encrypted channel. + +## Filtering Backends + +### BasePathFs + +The BasePathFs restricts all operations to a given path within an Fs. +The given file name to the operations on this Fs will be prepended with +the base path before calling the source Fs. + +```go +bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path") +``` + +### ReadOnlyFs + +A thin wrapper around the source Fs providing a read only view. + +```go +fs := afero.NewReadOnlyFs(afero.NewOsFs()) +_, err := fs.Create("/file.txt") +// err = syscall.EPERM +``` + +# RegexpFs + +A filtered view on file names, any file NOT matching +the passed regexp will be treated as non-existing. +Files not matching the regexp provided will not be created. +Directories are not filtered. + +```go +fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`)) +_, err := fs.Create("/file.html") +// err = syscall.ENOENT +``` + +### HttpFs + +Afero provides an http compatible backend which can wrap any of the existing +backends. + +The Http package requires a slightly specific version of Open which +returns an http.File type. + +Afero provides an httpFs file system which satisfies this requirement. +Any Afero FileSystem can be used as an httpFs. + +```go +httpFs := afero.NewHttpFs() +fileserver := http.FileServer(httpFs.Dir())) +http.Handle("/", fileserver) +``` + +## Composite Backends + +Afero provides the ability have two filesystems (or more) act as a single +file system. + +### CacheOnReadFs + +The CacheOnReadFs will lazily make copies of any accessed files from the base +layer into the overlay. Subsequent reads will be pulled from the overlay +directly permitting the request is within the cache duration of when it was +created in the overlay. + +If the base filesystem is writeable, any changes to files will be +done first to the base, then to the overlay layer. Write calls to open file +handles like `Write()` or `Truncate()` to the overlay first. + +To writing files to the overlay only, you can use the overlay Fs directly (not +via the union Fs). + +Cache files in the layer for the given time.Duration, a cache duration of 0 +means "forever" meaning the file will not be re-requested from the base ever. + +A read-only base will make the overlay also read-only but still copy files +from the base to the overlay when they're not present (or outdated) in the +caching layer. + +```go +base := afero.NewOsFs() +layer := afero.NewMemMapFs() +ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second) +``` + +### CopyOnWriteFs() + +The CopyOnWriteFs is a read only base file system with a potentially +writeable layer on top. + +Read operations will first look in the overlay and if not found there, will +serve the file from the base. + +Changes to the file system will only be made in the overlay. + +Any attempt to modify a file found only in the base will copy the file to the +overlay layer before modification (including opening a file with a writable +handle). + +Removing and Renaming files present only in the base layer is not currently +permitted. If a file is present in the base layer and the overlay, only the +overlay will be removed/renamed. + +```go + base := afero.NewOsFs() + roBase := afero.NewReadOnlyFs(base) + ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs()) + + fh, _ = ufs.Create("/home/test/file2.txt") + fh.WriteString("This is a test") + fh.Close() +``` + +In this example all write operations will only occur in memory (MemMapFs) +leaving the base filesystem (OsFs) untouched. + + +## Desired/possible backends + +The following is a short list of possible backends we hope someone will +implement: + +* SSH +* ZIP +* TAR +* S3 + +# About the project + +## What's in the name + +Afero comes from the latin roots Ad-Facere. + +**"Ad"** is a prefix meaning "to". + +**"Facere"** is a form of the root "faciō" making "make or do". + +The literal meaning of afero is "to make" or "to do" which seems very fitting +for a library that allows one to make files and directories and do things with them. + +The English word that shares the same roots as Afero is "affair". Affair shares +the same concept but as a noun it means "something that is made or done" or "an +object of a particular type". + +It's also nice that unlike some of my other libraries (hugo, cobra, viper) it +Googles very well. + +## Release Notes + +* **0.10.0** 2015.12.10 + * Full compatibility with Windows + * Introduction of afero utilities + * Test suite rewritten to work cross platform + * Normalize paths for MemMapFs + * Adding Sync to the file interface + * **Breaking Change** Walk and ReadDir have changed parameter order + * Moving types used by MemMapFs to a subpackage + * General bugfixes and improvements +* **0.9.0** 2015.11.05 + * New Walk function similar to filepath.Walk + * MemMapFs.OpenFile handles O_CREATE, O_APPEND, O_TRUNC + * MemMapFs.Remove now really deletes the file + * InMemoryFile.Readdir and Readdirnames work correctly + * InMemoryFile functions lock it for concurrent access + * Test suite improvements +* **0.8.0** 2014.10.28 + * First public version + * Interfaces feel ready for people to build using + * Interfaces satisfy all known uses + * MemMapFs passes the majority of the OS test suite + * OsFs passes the majority of the OS test suite + +## Contributing + +1. Fork it +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Commit your changes (`git commit -am 'Add some feature'`) +4. Push to the branch (`git push origin my-new-feature`) +5. Create new Pull Request + +## Contributors + +Names in no particular order: + +* [spf13](https://github.com/spf13) +* [jaqx0r](https://github.com/jaqx0r) +* [mbertschler](https://github.com/mbertschler) +* [xor-gate](https://github.com/xor-gate) + +## License + +Afero is released under the Apache 2.0 license. See +[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt) diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go new file mode 100644 index 0000000000..f5b5e127cd --- /dev/null +++ b/vendor/github.com/spf13/afero/afero.go @@ -0,0 +1,108 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package afero provides types and methods for interacting with the filesystem, +// as an abstraction layer. + +// Afero also provides a few implementations that are mostly interoperable. One that +// uses the operating system filesystem, one that uses memory to store files +// (cross platform) and an interface that should be implemented if you want to +// provide your own filesystem. + +package afero + +import ( + "errors" + "io" + "os" + "time" +) + +type Afero struct { + Fs +} + +// File represents a file in the filesystem. +type File interface { + io.Closer + io.Reader + io.ReaderAt + io.Seeker + io.Writer + io.WriterAt + + Name() string + Readdir(count int) ([]os.FileInfo, error) + Readdirnames(n int) ([]string, error) + Stat() (os.FileInfo, error) + Sync() error + Truncate(size int64) error + WriteString(s string) (ret int, err error) +} + +// Fs is the filesystem interface. +// +// Any simulated or real filesystem should implement this interface. +type Fs interface { + // Create creates a file in the filesystem, returning the file and an + // error, if any happens. + Create(name string) (File, error) + + // Mkdir creates a directory in the filesystem, return an error if any + // happens. + Mkdir(name string, perm os.FileMode) error + + // MkdirAll creates a directory path and all parents that does not exist + // yet. + MkdirAll(path string, perm os.FileMode) error + + // Open opens a file, returning it or an error, if any happens. + Open(name string) (File, error) + + // OpenFile opens a file using the given flags and the given mode. + OpenFile(name string, flag int, perm os.FileMode) (File, error) + + // Remove removes a file identified by name, returning an error, if any + // happens. + Remove(name string) error + + // RemoveAll removes a directory path and any children it contains. It + // does not fail if the path does not exist (return nil). + RemoveAll(path string) error + + // Rename renames a file. + Rename(oldname, newname string) error + + // Stat returns a FileInfo describing the named file, or an error, if any + // happens. + Stat(name string) (os.FileInfo, error) + + // The name of this FileSystem + Name() string + + //Chmod changes the mode of the named file to mode. + Chmod(name string, mode os.FileMode) error + + //Chtimes changes the access and modification times of the named file + Chtimes(name string, atime time.Time, mtime time.Time) error +} + +var ( + ErrFileClosed = errors.New("File is closed") + ErrOutOfRange = errors.New("Out of range") + ErrTooLarge = errors.New("Too large") + ErrFileNotFound = os.ErrNotExist + ErrFileExists = os.ErrExist + ErrDestinationExists = os.ErrExist +) diff --git a/vendor/github.com/spf13/afero/appveyor.yml b/vendor/github.com/spf13/afero/appveyor.yml new file mode 100644 index 0000000000..a633ad500c --- /dev/null +++ b/vendor/github.com/spf13/afero/appveyor.yml @@ -0,0 +1,15 @@ +version: '{build}' +clone_folder: C:\gopath\src\github.com\spf13\afero +environment: + GOPATH: C:\gopath +build_script: +- cmd: >- + go version + + go env + + go get -v github.com/spf13/afero/... + + go build github.com/spf13/afero +test_script: +- cmd: go test -race -v github.com/spf13/afero/... diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go new file mode 100644 index 0000000000..616ff8ff74 --- /dev/null +++ b/vendor/github.com/spf13/afero/basepath.go @@ -0,0 +1,180 @@ +package afero + +import ( + "os" + "path/filepath" + "runtime" + "strings" + "time" +) + +var _ Lstater = (*BasePathFs)(nil) + +// The BasePathFs restricts all operations to a given path within an Fs. +// The given file name to the operations on this Fs will be prepended with +// the base path before calling the base Fs. +// Any file name (after filepath.Clean()) outside this base path will be +// treated as non existing file. +// +// Note that it does not clean the error messages on return, so you may +// reveal the real path on errors. +type BasePathFs struct { + source Fs + path string +} + +type BasePathFile struct { + File + path string +} + +func (f *BasePathFile) Name() string { + sourcename := f.File.Name() + return strings.TrimPrefix(sourcename, filepath.Clean(f.path)) +} + +func NewBasePathFs(source Fs, path string) Fs { + return &BasePathFs{source: source, path: path} +} + +// on a file outside the base path it returns the given file name and an error, +// else the given file with the base path prepended +func (b *BasePathFs) RealPath(name string) (path string, err error) { + if err := validateBasePathName(name); err != nil { + return name, err + } + + bpath := filepath.Clean(b.path) + path = filepath.Clean(filepath.Join(bpath, name)) + if !strings.HasPrefix(path, bpath) { + return name, os.ErrNotExist + } + + return path, nil +} + +func validateBasePathName(name string) error { + if runtime.GOOS != "windows" { + // Not much to do here; + // the virtual file paths all look absolute on *nix. + return nil + } + + // On Windows a common mistake would be to provide an absolute OS path + // We could strip out the base part, but that would not be very portable. + if filepath.IsAbs(name) { + return os.ErrNotExist + } + + return nil +} + +func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "chtimes", Path: name, Err: err} + } + return b.source.Chtimes(name, atime, mtime) +} + +func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "chmod", Path: name, Err: err} + } + return b.source.Chmod(name, mode) +} + +func (b *BasePathFs) Name() string { + return "BasePathFs" +} + +func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "stat", Path: name, Err: err} + } + return b.source.Stat(name) +} + +func (b *BasePathFs) Rename(oldname, newname string) (err error) { + if oldname, err = b.RealPath(oldname); err != nil { + return &os.PathError{Op: "rename", Path: oldname, Err: err} + } + if newname, err = b.RealPath(newname); err != nil { + return &os.PathError{Op: "rename", Path: newname, Err: err} + } + return b.source.Rename(oldname, newname) +} + +func (b *BasePathFs) RemoveAll(name string) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "remove_all", Path: name, Err: err} + } + return b.source.RemoveAll(name) +} + +func (b *BasePathFs) Remove(name string) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "remove", Path: name, Err: err} + } + return b.source.Remove(name) +} + +func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "openfile", Path: name, Err: err} + } + sourcef, err := b.source.OpenFile(name, flag, mode) + if err != nil { + return nil, err + } + return &BasePathFile{sourcef, b.path}, nil +} + +func (b *BasePathFs) Open(name string) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "open", Path: name, Err: err} + } + sourcef, err := b.source.Open(name) + if err != nil { + return nil, err + } + return &BasePathFile{File: sourcef, path: b.path}, nil +} + +func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + return b.source.Mkdir(name, mode) +} + +func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + return b.source.MkdirAll(name, mode) +} + +func (b *BasePathFs) Create(name string) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "create", Path: name, Err: err} + } + sourcef, err := b.source.Create(name) + if err != nil { + return nil, err + } + return &BasePathFile{File: sourcef, path: b.path}, nil +} + +func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + name, err := b.RealPath(name) + if err != nil { + return nil, false, &os.PathError{Op: "lstat", Path: name, Err: err} + } + if lstater, ok := b.source.(Lstater); ok { + return lstater.LstatIfPossible(name) + } + fi, err := b.source.Stat(name) + return fi, false, err +} + +// vim: ts=4 sw=4 noexpandtab nolist syn=go diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go new file mode 100644 index 0000000000..29a26c67dd --- /dev/null +++ b/vendor/github.com/spf13/afero/cacheOnReadFs.go @@ -0,0 +1,290 @@ +package afero + +import ( + "os" + "syscall" + "time" +) + +// If the cache duration is 0, cache time will be unlimited, i.e. once +// a file is in the layer, the base will never be read again for this file. +// +// For cache times greater than 0, the modification time of a file is +// checked. Note that a lot of file system implementations only allow a +// resolution of a second for timestamps... or as the godoc for os.Chtimes() +// states: "The underlying filesystem may truncate or round the values to a +// less precise time unit." +// +// This caching union will forward all write calls also to the base file +// system first. To prevent writing to the base Fs, wrap it in a read-only +// filter - Note: this will also make the overlay read-only, for writing files +// in the overlay, use the overlay Fs directly, not via the union Fs. +type CacheOnReadFs struct { + base Fs + layer Fs + cacheTime time.Duration +} + +func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs { + return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime} +} + +type cacheState int + +const ( + // not present in the overlay, unknown if it exists in the base: + cacheMiss cacheState = iota + // present in the overlay and in base, base file is newer: + cacheStale + // present in the overlay - with cache time == 0 it may exist in the base, + // with cacheTime > 0 it exists in the base and is same age or newer in the + // overlay + cacheHit + // happens if someone writes directly to the overlay without + // going through this union + cacheLocal +) + +func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) { + var lfi, bfi os.FileInfo + lfi, err = u.layer.Stat(name) + if err == nil { + if u.cacheTime == 0 { + return cacheHit, lfi, nil + } + if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) { + bfi, err = u.base.Stat(name) + if err != nil { + return cacheLocal, lfi, nil + } + if bfi.ModTime().After(lfi.ModTime()) { + return cacheStale, bfi, nil + } + } + return cacheHit, lfi, nil + } + + if err == syscall.ENOENT || os.IsNotExist(err) { + return cacheMiss, nil, nil + } + + return cacheMiss, nil, err +} + +func (u *CacheOnReadFs) copyToLayer(name string) error { + return copyToLayer(u.base, u.layer, name) +} + +func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Chtimes(name, atime, mtime) + case cacheStale, cacheMiss: + if err := u.copyToLayer(name); err != nil { + return err + } + err = u.base.Chtimes(name, atime, mtime) + } + if err != nil { + return err + } + return u.layer.Chtimes(name, atime, mtime) +} + +func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Chmod(name, mode) + case cacheStale, cacheMiss: + if err := u.copyToLayer(name); err != nil { + return err + } + err = u.base.Chmod(name, mode) + } + if err != nil { + return err + } + return u.layer.Chmod(name, mode) +} + +func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) { + st, fi, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + switch st { + case cacheMiss: + return u.base.Stat(name) + default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo + return fi, nil + } +} + +func (u *CacheOnReadFs) Rename(oldname, newname string) error { + st, _, err := u.cacheStatus(oldname) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Rename(oldname, newname) + case cacheStale, cacheMiss: + if err := u.copyToLayer(oldname); err != nil { + return err + } + err = u.base.Rename(oldname, newname) + } + if err != nil { + return err + } + return u.layer.Rename(oldname, newname) +} + +func (u *CacheOnReadFs) Remove(name string) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit, cacheStale, cacheMiss: + err = u.base.Remove(name) + } + if err != nil { + return err + } + return u.layer.Remove(name) +} + +func (u *CacheOnReadFs) RemoveAll(name string) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit, cacheStale, cacheMiss: + err = u.base.RemoveAll(name) + } + if err != nil { + return err + } + return u.layer.RemoveAll(name) +} + +func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + st, _, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + switch st { + case cacheLocal, cacheHit: + default: + if err := u.copyToLayer(name); err != nil { + return nil, err + } + } + if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + bfi, err := u.base.OpenFile(name, flag, perm) + if err != nil { + return nil, err + } + lfi, err := u.layer.OpenFile(name, flag, perm) + if err != nil { + bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...? + return nil, err + } + return &UnionFile{Base: bfi, Layer: lfi}, nil + } + return u.layer.OpenFile(name, flag, perm) +} + +func (u *CacheOnReadFs) Open(name string) (File, error) { + st, fi, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + + switch st { + case cacheLocal: + return u.layer.Open(name) + + case cacheMiss: + bfi, err := u.base.Stat(name) + if err != nil { + return nil, err + } + if bfi.IsDir() { + return u.base.Open(name) + } + if err := u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.Open(name) + + case cacheStale: + if !fi.IsDir() { + if err := u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.Open(name) + } + case cacheHit: + if !fi.IsDir() { + return u.layer.Open(name) + } + } + // the dirs from cacheHit, cacheStale fall down here: + bfile, _ := u.base.Open(name) + lfile, err := u.layer.Open(name) + if err != nil && bfile == nil { + return nil, err + } + return &UnionFile{Base: bfile, Layer: lfile}, nil +} + +func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error { + err := u.base.Mkdir(name, perm) + if err != nil { + return err + } + return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache +} + +func (u *CacheOnReadFs) Name() string { + return "CacheOnReadFs" +} + +func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error { + err := u.base.MkdirAll(name, perm) + if err != nil { + return err + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CacheOnReadFs) Create(name string) (File, error) { + bfh, err := u.base.Create(name) + if err != nil { + return nil, err + } + lfh, err := u.layer.Create(name) + if err != nil { + // oops, see comment about OS_TRUNC above, should we remove? then we have to + // remember if the file did not exist before + bfh.Close() + return nil, err + } + return &UnionFile{Base: bfh, Layer: lfh}, nil +} diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go new file mode 100644 index 0000000000..5728243d96 --- /dev/null +++ b/vendor/github.com/spf13/afero/const_bsds.go @@ -0,0 +1,22 @@ +// Copyright © 2016 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build darwin openbsd freebsd netbsd dragonfly + +package afero + +import ( + "syscall" +) + +const BADFD = syscall.EBADF diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go new file mode 100644 index 0000000000..968fc2783e --- /dev/null +++ b/vendor/github.com/spf13/afero/const_win_unix.go @@ -0,0 +1,25 @@ +// Copyright © 2016 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +build !darwin +// +build !openbsd +// +build !freebsd +// +build !dragonfly +// +build !netbsd + +package afero + +import ( + "syscall" +) + +const BADFD = syscall.EBADFD diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go new file mode 100644 index 0000000000..e8108a851e --- /dev/null +++ b/vendor/github.com/spf13/afero/copyOnWriteFs.go @@ -0,0 +1,293 @@ +package afero + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + "time" +) + +var _ Lstater = (*CopyOnWriteFs)(nil) + +// The CopyOnWriteFs is a union filesystem: a read only base file system with +// a possibly writeable layer on top. Changes to the file system will only +// be made in the overlay: Changing an existing file in the base layer which +// is not present in the overlay will copy the file to the overlay ("changing" +// includes also calls to e.g. Chtimes() and Chmod()). +// +// Reading directories is currently only supported via Open(), not OpenFile(). +type CopyOnWriteFs struct { + base Fs + layer Fs +} + +func NewCopyOnWriteFs(base Fs, layer Fs) Fs { + return &CopyOnWriteFs{base: base, layer: layer} +} + +// Returns true if the file is not in the overlay +func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) { + if _, err := u.layer.Stat(name); err == nil { + return false, nil + } + _, err := u.base.Stat(name) + if err != nil { + if oerr, ok := err.(*os.PathError); ok { + if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR { + return false, nil + } + } + if err == syscall.ENOENT { + return false, nil + } + } + return true, err +} + +func (u *CopyOnWriteFs) copyToLayer(name string) error { + return copyToLayer(u.base, u.layer, name) +} + +func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error { + b, err := u.isBaseFile(name) + if err != nil { + return err + } + if b { + if err := u.copyToLayer(name); err != nil { + return err + } + } + return u.layer.Chtimes(name, atime, mtime) +} + +func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error { + b, err := u.isBaseFile(name) + if err != nil { + return err + } + if b { + if err := u.copyToLayer(name); err != nil { + return err + } + } + return u.layer.Chmod(name, mode) +} + +func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) { + fi, err := u.layer.Stat(name) + if err != nil { + isNotExist := u.isNotExist(err) + if isNotExist { + return u.base.Stat(name) + } + return nil, err + } + return fi, nil +} + +func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + llayer, ok1 := u.layer.(Lstater) + lbase, ok2 := u.base.(Lstater) + + if ok1 { + fi, b, err := llayer.LstatIfPossible(name) + if err == nil { + return fi, b, nil + } + + if !u.isNotExist(err) { + return nil, b, err + } + } + + if ok2 { + fi, b, err := lbase.LstatIfPossible(name) + if err == nil { + return fi, b, nil + } + if !u.isNotExist(err) { + return nil, b, err + } + } + + fi, err := u.Stat(name) + + return fi, false, err +} + +func (u *CopyOnWriteFs) isNotExist(err error) bool { + if e, ok := err.(*os.PathError); ok { + err = e.Err + } + if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR { + return true + } + return false +} + +// Renaming files present only in the base layer is not permitted +func (u *CopyOnWriteFs) Rename(oldname, newname string) error { + b, err := u.isBaseFile(oldname) + if err != nil { + return err + } + if b { + return syscall.EPERM + } + return u.layer.Rename(oldname, newname) +} + +// Removing files present only in the base layer is not permitted. If +// a file is present in the base layer and the overlay, only the overlay +// will be removed. +func (u *CopyOnWriteFs) Remove(name string) error { + err := u.layer.Remove(name) + switch err { + case syscall.ENOENT: + _, err = u.base.Stat(name) + if err == nil { + return syscall.EPERM + } + return syscall.ENOENT + default: + return err + } +} + +func (u *CopyOnWriteFs) RemoveAll(name string) error { + err := u.layer.RemoveAll(name) + switch err { + case syscall.ENOENT: + _, err = u.base.Stat(name) + if err == nil { + return syscall.EPERM + } + return syscall.ENOENT + default: + return err + } +} + +func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + b, err := u.isBaseFile(name) + if err != nil { + return nil, err + } + + if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + if b { + if err = u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.OpenFile(name, flag, perm) + } + + dir := filepath.Dir(name) + isaDir, err := IsDir(u.base, dir) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + if isaDir { + if err = u.layer.MkdirAll(dir, 0777); err != nil { + return nil, err + } + return u.layer.OpenFile(name, flag, perm) + } + + isaDir, err = IsDir(u.layer, dir) + if err != nil { + return nil, err + } + if isaDir { + return u.layer.OpenFile(name, flag, perm) + } + + return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist? + } + if b { + return u.base.OpenFile(name, flag, perm) + } + return u.layer.OpenFile(name, flag, perm) +} + +// This function handles the 9 different possibilities caused +// by the union which are the intersection of the following... +// layer: doesn't exist, exists as a file, and exists as a directory +// base: doesn't exist, exists as a file, and exists as a directory +func (u *CopyOnWriteFs) Open(name string) (File, error) { + // Since the overlay overrides the base we check that first + b, err := u.isBaseFile(name) + if err != nil { + return nil, err + } + + // If overlay doesn't exist, return the base (base state irrelevant) + if b { + return u.base.Open(name) + } + + // If overlay is a file, return it (base state irrelevant) + dir, err := IsDir(u.layer, name) + if err != nil { + return nil, err + } + if !dir { + return u.layer.Open(name) + } + + // Overlay is a directory, base state now matters. + // Base state has 3 states to check but 2 outcomes: + // A. It's a file or non-readable in the base (return just the overlay) + // B. It's an accessible directory in the base (return a UnionFile) + + // If base is file or nonreadable, return overlay + dir, err = IsDir(u.base, name) + if !dir || err != nil { + return u.layer.Open(name) + } + + // Both base & layer are directories + // Return union file (if opens are without error) + bfile, bErr := u.base.Open(name) + lfile, lErr := u.layer.Open(name) + + // If either have errors at this point something is very wrong. Return nil and the errors + if bErr != nil || lErr != nil { + return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr) + } + + return &UnionFile{Base: bfile, Layer: lfile}, nil +} + +func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error { + dir, err := IsDir(u.base, name) + if err != nil { + return u.layer.MkdirAll(name, perm) + } + if dir { + return ErrFileExists + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CopyOnWriteFs) Name() string { + return "CopyOnWriteFs" +} + +func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error { + dir, err := IsDir(u.base, name) + if err != nil { + return u.layer.MkdirAll(name, perm) + } + if dir { + // This is in line with how os.MkdirAll behaves. + return nil + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CopyOnWriteFs) Create(name string) (File, error) { + return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666) +} diff --git a/vendor/github.com/spf13/afero/go.mod b/vendor/github.com/spf13/afero/go.mod new file mode 100644 index 0000000000..0868550995 --- /dev/null +++ b/vendor/github.com/spf13/afero/go.mod @@ -0,0 +1,3 @@ +module github.com/spf13/afero + +require golang.org/x/text v0.3.0 diff --git a/vendor/github.com/spf13/afero/go.sum b/vendor/github.com/spf13/afero/go.sum new file mode 100644 index 0000000000..6bad37b2a7 --- /dev/null +++ b/vendor/github.com/spf13/afero/go.sum @@ -0,0 +1,2 @@ +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/spf13/afero/httpFs.go b/vendor/github.com/spf13/afero/httpFs.go new file mode 100644 index 0000000000..c42193688c --- /dev/null +++ b/vendor/github.com/spf13/afero/httpFs.go @@ -0,0 +1,110 @@ +// Copyright © 2014 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "errors" + "net/http" + "os" + "path" + "path/filepath" + "strings" + "time" +) + +type httpDir struct { + basePath string + fs HttpFs +} + +func (d httpDir) Open(name string) (http.File, error) { + if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || + strings.Contains(name, "\x00") { + return nil, errors.New("http: invalid character in file path") + } + dir := string(d.basePath) + if dir == "" { + dir = "." + } + + f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name)))) + if err != nil { + return nil, err + } + return f, nil +} + +type HttpFs struct { + source Fs +} + +func NewHttpFs(source Fs) *HttpFs { + return &HttpFs{source: source} +} + +func (h HttpFs) Dir(s string) *httpDir { + return &httpDir{basePath: s, fs: h} +} + +func (h HttpFs) Name() string { return "h HttpFs" } + +func (h HttpFs) Create(name string) (File, error) { + return h.source.Create(name) +} + +func (h HttpFs) Chmod(name string, mode os.FileMode) error { + return h.source.Chmod(name, mode) +} + +func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + return h.source.Chtimes(name, atime, mtime) +} + +func (h HttpFs) Mkdir(name string, perm os.FileMode) error { + return h.source.Mkdir(name, perm) +} + +func (h HttpFs) MkdirAll(path string, perm os.FileMode) error { + return h.source.MkdirAll(path, perm) +} + +func (h HttpFs) Open(name string) (http.File, error) { + f, err := h.source.Open(name) + if err == nil { + if httpfile, ok := f.(http.File); ok { + return httpfile, nil + } + } + return nil, err +} + +func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + return h.source.OpenFile(name, flag, perm) +} + +func (h HttpFs) Remove(name string) error { + return h.source.Remove(name) +} + +func (h HttpFs) RemoveAll(path string) error { + return h.source.RemoveAll(path) +} + +func (h HttpFs) Rename(oldname, newname string) error { + return h.source.Rename(oldname, newname) +} + +func (h HttpFs) Stat(name string) (os.FileInfo, error) { + return h.source.Stat(name) +} diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go new file mode 100644 index 0000000000..5c3a3d8fff --- /dev/null +++ b/vendor/github.com/spf13/afero/ioutil.go @@ -0,0 +1,230 @@ +// Copyright ©2015 The Go Authors +// Copyright ©2015 Steve Francia +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "bytes" + "io" + "os" + "path/filepath" + "sort" + "strconv" + "sync" + "time" +) + +// byName implements sort.Interface. +type byName []os.FileInfo + +func (f byName) Len() int { return len(f) } +func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() } +func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] } + +// ReadDir reads the directory named by dirname and returns +// a list of sorted directory entries. +func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) { + return ReadDir(a.Fs, dirname) +} + +func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) { + f, err := fs.Open(dirname) + if err != nil { + return nil, err + } + list, err := f.Readdir(-1) + f.Close() + if err != nil { + return nil, err + } + sort.Sort(byName(list)) + return list, nil +} + +// ReadFile reads the file named by filename and returns the contents. +// A successful call returns err == nil, not err == EOF. Because ReadFile +// reads the whole file, it does not treat an EOF from Read as an error +// to be reported. +func (a Afero) ReadFile(filename string) ([]byte, error) { + return ReadFile(a.Fs, filename) +} + +func ReadFile(fs Fs, filename string) ([]byte, error) { + f, err := fs.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + // It's a good but not certain bet that FileInfo will tell us exactly how much to + // read, so let's try it but be prepared for the answer to be wrong. + var n int64 + + if fi, err := f.Stat(); err == nil { + // Don't preallocate a huge buffer, just in case. + if size := fi.Size(); size < 1e9 { + n = size + } + } + // As initial capacity for readAll, use n + a little extra in case Size is zero, + // and to avoid another allocation after Read has filled the buffer. The readAll + // call will read into its allocated internal buffer cheaply. If the size was + // wrong, we'll either waste some space off the end or reallocate as needed, but + // in the overwhelmingly common case we'll get it just right. + return readAll(f, n+bytes.MinRead) +} + +// readAll reads from r until an error or EOF and returns the data it read +// from the internal buffer allocated with a specified capacity. +func readAll(r io.Reader, capacity int64) (b []byte, err error) { + buf := bytes.NewBuffer(make([]byte, 0, capacity)) + // If the buffer overflows, we will get bytes.ErrTooLarge. + // Return that as an error. Any other panic remains. + defer func() { + e := recover() + if e == nil { + return + } + if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge { + err = panicErr + } else { + panic(e) + } + }() + _, err = buf.ReadFrom(r) + return buf.Bytes(), err +} + +// ReadAll reads from r until an error or EOF and returns the data it read. +// A successful call returns err == nil, not err == EOF. Because ReadAll is +// defined to read from src until EOF, it does not treat an EOF from Read +// as an error to be reported. +func ReadAll(r io.Reader) ([]byte, error) { + return readAll(r, bytes.MinRead) +} + +// WriteFile writes data to a file named by filename. +// If the file does not exist, WriteFile creates it with permissions perm; +// otherwise WriteFile truncates it before writing. +func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error { + return WriteFile(a.Fs, filename, data, perm) +} + +func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error { + f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +// Random number state. +// We generate random temporary file names so that there's a good +// chance the file doesn't exist yet - keeps the number of tries in +// TempFile to a minimum. +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} + +func nextSuffix() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} + +// TempFile creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func (a Afero) TempFile(dir, prefix string) (f File, err error) { + return TempFile(a.Fs, dir, prefix) +} + +func TempFile(fs Fs, dir, prefix string) (f File, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+nextSuffix()) + f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + break + } + return +} + +// TempDir creates a new temporary directory in the directory dir +// with a name beginning with prefix and returns the path of the +// new directory. If dir is the empty string, TempDir uses the +// default directory for temporary files (see os.TempDir). +// Multiple programs calling TempDir simultaneously +// will not choose the same directory. It is the caller's responsibility +// to remove the directory when no longer needed. +func (a Afero) TempDir(dir, prefix string) (name string, err error) { + return TempDir(a.Fs, dir, prefix) +} +func TempDir(fs Fs, dir, prefix string) (name string, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + try := filepath.Join(dir, prefix+nextSuffix()) + err = fs.Mkdir(try, 0700) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + if err == nil { + name = try + } + break + } + return +} diff --git a/vendor/github.com/spf13/afero/lstater.go b/vendor/github.com/spf13/afero/lstater.go new file mode 100644 index 0000000000..89c1bfc0a7 --- /dev/null +++ b/vendor/github.com/spf13/afero/lstater.go @@ -0,0 +1,27 @@ +// Copyright © 2018 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" +) + +// Lstater is an optional interface in Afero. It is only implemented by the +// filesystems saying so. +// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem. +// Else it will call Stat. +// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not. +type Lstater interface { + LstatIfPossible(name string) (os.FileInfo, bool, error) +} diff --git a/vendor/github.com/spf13/afero/match.go b/vendor/github.com/spf13/afero/match.go new file mode 100644 index 0000000000..c18a87fb71 --- /dev/null +++ b/vendor/github.com/spf13/afero/match.go @@ -0,0 +1,110 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2009 The Go Authors. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "path/filepath" + "sort" + "strings" +) + +// Glob returns the names of all files matching pattern or nil +// if there is no matching file. The syntax of patterns is the same +// as in Match. The pattern may describe hierarchical names such as +// /usr/*/bin/ed (assuming the Separator is '/'). +// +// Glob ignores file system errors such as I/O errors reading directories. +// The only possible returned error is ErrBadPattern, when pattern +// is malformed. +// +// This was adapted from (http://golang.org/pkg/path/filepath) and uses several +// built-ins from that package. +func Glob(fs Fs, pattern string) (matches []string, err error) { + if !hasMeta(pattern) { + // Lstat not supported by a ll filesystems. + if _, err = lstatIfPossible(fs, pattern); err != nil { + return nil, nil + } + return []string{pattern}, nil + } + + dir, file := filepath.Split(pattern) + switch dir { + case "": + dir = "." + case string(filepath.Separator): + // nothing + default: + dir = dir[0 : len(dir)-1] // chop off trailing separator + } + + if !hasMeta(dir) { + return glob(fs, dir, file, nil) + } + + var m []string + m, err = Glob(fs, dir) + if err != nil { + return + } + for _, d := range m { + matches, err = glob(fs, d, file, matches) + if err != nil { + return + } + } + return +} + +// glob searches for files matching pattern in the directory dir +// and appends them to matches. If the directory cannot be +// opened, it returns the existing matches. New matches are +// added in lexicographical order. +func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) { + m = matches + fi, err := fs.Stat(dir) + if err != nil { + return + } + if !fi.IsDir() { + return + } + d, err := fs.Open(dir) + if err != nil { + return + } + defer d.Close() + + names, _ := d.Readdirnames(-1) + sort.Strings(names) + + for _, n := range names { + matched, err := filepath.Match(pattern, n) + if err != nil { + return m, err + } + if matched { + m = append(m, filepath.Join(dir, n)) + } + } + return +} + +// hasMeta reports whether path contains any of the magic characters +// recognized by Match. +func hasMeta(path string) bool { + // TODO(niemeyer): Should other magic characters be added here? + return strings.IndexAny(path, "*?[") >= 0 +} diff --git a/vendor/github.com/spf13/afero/mem/dir.go b/vendor/github.com/spf13/afero/mem/dir.go new file mode 100644 index 0000000000..e104013f45 --- /dev/null +++ b/vendor/github.com/spf13/afero/mem/dir.go @@ -0,0 +1,37 @@ +// Copyright © 2014 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +type Dir interface { + Len() int + Names() []string + Files() []*FileData + Add(*FileData) + Remove(*FileData) +} + +func RemoveFromMemDir(dir *FileData, f *FileData) { + dir.memDir.Remove(f) +} + +func AddToMemDir(dir *FileData, f *FileData) { + dir.memDir.Add(f) +} + +func InitializeDir(d *FileData) { + if d.memDir == nil { + d.dir = true + d.memDir = &DirMap{} + } +} diff --git a/vendor/github.com/spf13/afero/mem/dirmap.go b/vendor/github.com/spf13/afero/mem/dirmap.go new file mode 100644 index 0000000000..03a57ee5b5 --- /dev/null +++ b/vendor/github.com/spf13/afero/mem/dirmap.go @@ -0,0 +1,43 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +import "sort" + +type DirMap map[string]*FileData + +func (m DirMap) Len() int { return len(m) } +func (m DirMap) Add(f *FileData) { m[f.name] = f } +func (m DirMap) Remove(f *FileData) { delete(m, f.name) } +func (m DirMap) Files() (files []*FileData) { + for _, f := range m { + files = append(files, f) + } + sort.Sort(filesSorter(files)) + return files +} + +// implement sort.Interface for []*FileData +type filesSorter []*FileData + +func (s filesSorter) Len() int { return len(s) } +func (s filesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name } + +func (m DirMap) Names() (names []string) { + for x := range m { + names = append(names, x) + } + return names +} diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go new file mode 100644 index 0000000000..7af2fb56ff --- /dev/null +++ b/vendor/github.com/spf13/afero/mem/file.go @@ -0,0 +1,317 @@ +// Copyright © 2015 Steve Francia . +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +import ( + "bytes" + "errors" + "io" + "os" + "path/filepath" + "sync" + "sync/atomic" +) + +import "time" + +const FilePathSeparator = string(filepath.Separator) + +type File struct { + // atomic requires 64-bit alignment for struct field access + at int64 + readDirCount int64 + closed bool + readOnly bool + fileData *FileData +} + +func NewFileHandle(data *FileData) *File { + return &File{fileData: data} +} + +func NewReadOnlyFileHandle(data *FileData) *File { + return &File{fileData: data, readOnly: true} +} + +func (f File) Data() *FileData { + return f.fileData +} + +type FileData struct { + sync.Mutex + name string + data []byte + memDir Dir + dir bool + mode os.FileMode + modtime time.Time +} + +func (d *FileData) Name() string { + d.Lock() + defer d.Unlock() + return d.name +} + +func CreateFile(name string) *FileData { + return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()} +} + +func CreateDir(name string) *FileData { + return &FileData{name: name, memDir: &DirMap{}, dir: true} +} + +func ChangeFileName(f *FileData, newname string) { + f.Lock() + f.name = newname + f.Unlock() +} + +func SetMode(f *FileData, mode os.FileMode) { + f.Lock() + f.mode = mode + f.Unlock() +} + +func SetModTime(f *FileData, mtime time.Time) { + f.Lock() + setModTime(f, mtime) + f.Unlock() +} + +func setModTime(f *FileData, mtime time.Time) { + f.modtime = mtime +} + +func GetFileInfo(f *FileData) *FileInfo { + return &FileInfo{f} +} + +func (f *File) Open() error { + atomic.StoreInt64(&f.at, 0) + atomic.StoreInt64(&f.readDirCount, 0) + f.fileData.Lock() + f.closed = false + f.fileData.Unlock() + return nil +} + +func (f *File) Close() error { + f.fileData.Lock() + f.closed = true + if !f.readOnly { + setModTime(f.fileData, time.Now()) + } + f.fileData.Unlock() + return nil +} + +func (f *File) Name() string { + return f.fileData.Name() +} + +func (f *File) Stat() (os.FileInfo, error) { + return &FileInfo{f.fileData}, nil +} + +func (f *File) Sync() error { + return nil +} + +func (f *File) Readdir(count int) (res []os.FileInfo, err error) { + if !f.fileData.dir { + return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")} + } + var outLength int64 + + f.fileData.Lock() + files := f.fileData.memDir.Files()[f.readDirCount:] + if count > 0 { + if len(files) < count { + outLength = int64(len(files)) + } else { + outLength = int64(count) + } + if len(files) == 0 { + err = io.EOF + } + } else { + outLength = int64(len(files)) + } + f.readDirCount += outLength + f.fileData.Unlock() + + res = make([]os.FileInfo, outLength) + for i := range res { + res[i] = &FileInfo{files[i]} + } + + return res, err +} + +func (f *File) Readdirnames(n int) (names []string, err error) { + fi, err := f.Readdir(n) + names = make([]string, len(fi)) + for i, f := range fi { + _, names[i] = filepath.Split(f.Name()) + } + return names, err +} + +func (f *File) Read(b []byte) (n int, err error) { + f.fileData.Lock() + defer f.fileData.Unlock() + if f.closed == true { + return 0, ErrFileClosed + } + if len(b) > 0 && int(f.at) == len(f.fileData.data) { + return 0, io.EOF + } + if int(f.at) > len(f.fileData.data) { + return 0, io.ErrUnexpectedEOF + } + if len(f.fileData.data)-int(f.at) >= len(b) { + n = len(b) + } else { + n = len(f.fileData.data) - int(f.at) + } + copy(b, f.fileData.data[f.at:f.at+int64(n)]) + atomic.AddInt64(&f.at, int64(n)) + return +} + +func (f *File) ReadAt(b []byte, off int64) (n int, err error) { + atomic.StoreInt64(&f.at, off) + return f.Read(b) +} + +func (f *File) Truncate(size int64) error { + if f.closed == true { + return ErrFileClosed + } + if f.readOnly { + return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")} + } + if size < 0 { + return ErrOutOfRange + } + if size > int64(len(f.fileData.data)) { + diff := size - int64(len(f.fileData.data)) + f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{00}, int(diff))...) + } else { + f.fileData.data = f.fileData.data[0:size] + } + setModTime(f.fileData, time.Now()) + return nil +} + +func (f *File) Seek(offset int64, whence int) (int64, error) { + if f.closed == true { + return 0, ErrFileClosed + } + switch whence { + case 0: + atomic.StoreInt64(&f.at, offset) + case 1: + atomic.AddInt64(&f.at, int64(offset)) + case 2: + atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset) + } + return f.at, nil +} + +func (f *File) Write(b []byte) (n int, err error) { + if f.readOnly { + return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")} + } + n = len(b) + cur := atomic.LoadInt64(&f.at) + f.fileData.Lock() + defer f.fileData.Unlock() + diff := cur - int64(len(f.fileData.data)) + var tail []byte + if n+int(cur) < len(f.fileData.data) { + tail = f.fileData.data[n+int(cur):] + } + if diff > 0 { + f.fileData.data = append(bytes.Repeat([]byte{00}, int(diff)), b...) + f.fileData.data = append(f.fileData.data, tail...) + } else { + f.fileData.data = append(f.fileData.data[:cur], b...) + f.fileData.data = append(f.fileData.data, tail...) + } + setModTime(f.fileData, time.Now()) + + atomic.StoreInt64(&f.at, int64(len(f.fileData.data))) + return +} + +func (f *File) WriteAt(b []byte, off int64) (n int, err error) { + atomic.StoreInt64(&f.at, off) + return f.Write(b) +} + +func (f *File) WriteString(s string) (ret int, err error) { + return f.Write([]byte(s)) +} + +func (f *File) Info() *FileInfo { + return &FileInfo{f.fileData} +} + +type FileInfo struct { + *FileData +} + +// Implements os.FileInfo +func (s *FileInfo) Name() string { + s.Lock() + _, name := filepath.Split(s.name) + s.Unlock() + return name +} +func (s *FileInfo) Mode() os.FileMode { + s.Lock() + defer s.Unlock() + return s.mode +} +func (s *FileInfo) ModTime() time.Time { + s.Lock() + defer s.Unlock() + return s.modtime +} +func (s *FileInfo) IsDir() bool { + s.Lock() + defer s.Unlock() + return s.dir +} +func (s *FileInfo) Sys() interface{} { return nil } +func (s *FileInfo) Size() int64 { + if s.IsDir() { + return int64(42) + } + s.Lock() + defer s.Unlock() + return int64(len(s.data)) +} + +var ( + ErrFileClosed = errors.New("File is closed") + ErrOutOfRange = errors.New("Out of range") + ErrTooLarge = errors.New("Too large") + ErrFileNotFound = os.ErrNotExist + ErrFileExists = os.ErrExist + ErrDestinationExists = os.ErrExist +) diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go new file mode 100644 index 0000000000..09498e70fb --- /dev/null +++ b/vendor/github.com/spf13/afero/memmap.go @@ -0,0 +1,365 @@ +// Copyright © 2014 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "fmt" + "log" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/spf13/afero/mem" +) + +type MemMapFs struct { + mu sync.RWMutex + data map[string]*mem.FileData + init sync.Once +} + +func NewMemMapFs() Fs { + return &MemMapFs{} +} + +func (m *MemMapFs) getData() map[string]*mem.FileData { + m.init.Do(func() { + m.data = make(map[string]*mem.FileData) + // Root should always exist, right? + // TODO: what about windows? + m.data[FilePathSeparator] = mem.CreateDir(FilePathSeparator) + }) + return m.data +} + +func (*MemMapFs) Name() string { return "MemMapFS" } + +func (m *MemMapFs) Create(name string) (File, error) { + name = normalizePath(name) + m.mu.Lock() + file := mem.CreateFile(name) + m.getData()[name] = file + m.registerWithParent(file) + m.mu.Unlock() + return mem.NewFileHandle(file), nil +} + +func (m *MemMapFs) unRegisterWithParent(fileName string) error { + f, err := m.lockfreeOpen(fileName) + if err != nil { + return err + } + parent := m.findParent(f) + if parent == nil { + log.Panic("parent of ", f.Name(), " is nil") + } + + parent.Lock() + mem.RemoveFromMemDir(parent, f) + parent.Unlock() + return nil +} + +func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData { + pdir, _ := filepath.Split(f.Name()) + pdir = filepath.Clean(pdir) + pfile, err := m.lockfreeOpen(pdir) + if err != nil { + return nil + } + return pfile +} + +func (m *MemMapFs) registerWithParent(f *mem.FileData) { + if f == nil { + return + } + parent := m.findParent(f) + if parent == nil { + pdir := filepath.Dir(filepath.Clean(f.Name())) + err := m.lockfreeMkdir(pdir, 0777) + if err != nil { + //log.Println("Mkdir error:", err) + return + } + parent, err = m.lockfreeOpen(pdir) + if err != nil { + //log.Println("Open after Mkdir error:", err) + return + } + } + + parent.Lock() + mem.InitializeDir(parent) + mem.AddToMemDir(parent, f) + parent.Unlock() +} + +func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error { + name = normalizePath(name) + x, ok := m.getData()[name] + if ok { + // Only return ErrFileExists if it's a file, not a directory. + i := mem.FileInfo{FileData: x} + if !i.IsDir() { + return ErrFileExists + } + } else { + item := mem.CreateDir(name) + m.getData()[name] = item + m.registerWithParent(item) + } + return nil +} + +func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error { + name = normalizePath(name) + + m.mu.RLock() + _, ok := m.getData()[name] + m.mu.RUnlock() + if ok { + return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists} + } + + m.mu.Lock() + item := mem.CreateDir(name) + m.getData()[name] = item + m.registerWithParent(item) + m.mu.Unlock() + + m.Chmod(name, perm|os.ModeDir) + + return nil +} + +func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error { + err := m.Mkdir(path, perm) + if err != nil { + if err.(*os.PathError).Err == ErrFileExists { + return nil + } + return err + } + return nil +} + +// Handle some relative paths +func normalizePath(path string) string { + path = filepath.Clean(path) + + switch path { + case ".": + return FilePathSeparator + case "..": + return FilePathSeparator + default: + return path + } +} + +func (m *MemMapFs) Open(name string) (File, error) { + f, err := m.open(name) + if f != nil { + return mem.NewReadOnlyFileHandle(f), err + } + return nil, err +} + +func (m *MemMapFs) openWrite(name string) (File, error) { + f, err := m.open(name) + if f != nil { + return mem.NewFileHandle(f), err + } + return nil, err +} + +func (m *MemMapFs) open(name string) (*mem.FileData, error) { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound} + } + return f, nil +} + +func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) { + name = normalizePath(name) + f, ok := m.getData()[name] + if ok { + return f, nil + } else { + return nil, ErrFileNotFound + } +} + +func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + chmod := false + file, err := m.openWrite(name) + if os.IsNotExist(err) && (flag&os.O_CREATE > 0) { + file, err = m.Create(name) + chmod = true + } + if err != nil { + return nil, err + } + if flag == os.O_RDONLY { + file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data()) + } + if flag&os.O_APPEND > 0 { + _, err = file.Seek(0, os.SEEK_END) + if err != nil { + file.Close() + return nil, err + } + } + if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 { + err = file.Truncate(0) + if err != nil { + file.Close() + return nil, err + } + } + if chmod { + m.Chmod(name, perm) + } + return file, nil +} + +func (m *MemMapFs) Remove(name string) error { + name = normalizePath(name) + + m.mu.Lock() + defer m.mu.Unlock() + + if _, ok := m.getData()[name]; ok { + err := m.unRegisterWithParent(name) + if err != nil { + return &os.PathError{Op: "remove", Path: name, Err: err} + } + delete(m.getData(), name) + } else { + return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist} + } + return nil +} + +func (m *MemMapFs) RemoveAll(path string) error { + path = normalizePath(path) + m.mu.Lock() + m.unRegisterWithParent(path) + m.mu.Unlock() + + m.mu.RLock() + defer m.mu.RUnlock() + + for p, _ := range m.getData() { + if strings.HasPrefix(p, path) { + m.mu.RUnlock() + m.mu.Lock() + delete(m.getData(), p) + m.mu.Unlock() + m.mu.RLock() + } + } + return nil +} + +func (m *MemMapFs) Rename(oldname, newname string) error { + oldname = normalizePath(oldname) + newname = normalizePath(newname) + + if oldname == newname { + return nil + } + + m.mu.RLock() + defer m.mu.RUnlock() + if _, ok := m.getData()[oldname]; ok { + m.mu.RUnlock() + m.mu.Lock() + m.unRegisterWithParent(oldname) + fileData := m.getData()[oldname] + delete(m.getData(), oldname) + mem.ChangeFileName(fileData, newname) + m.getData()[newname] = fileData + m.registerWithParent(fileData) + m.mu.Unlock() + m.mu.RLock() + } else { + return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound} + } + return nil +} + +func (m *MemMapFs) Stat(name string) (os.FileInfo, error) { + f, err := m.Open(name) + if err != nil { + return nil, err + } + fi := mem.GetFileInfo(f.(*mem.File).Data()) + return fi, nil +} + +func (m *MemMapFs) Chmod(name string, mode os.FileMode) error { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound} + } + + m.mu.Lock() + mem.SetMode(f, mode) + m.mu.Unlock() + + return nil +} + +func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound} + } + + m.mu.Lock() + mem.SetModTime(f, mtime) + m.mu.Unlock() + + return nil +} + +func (m *MemMapFs) List() { + for _, x := range m.data { + y := mem.FileInfo{FileData: x} + fmt.Println(x.Name(), y.Size()) + } +} + +// func debugMemMapList(fs Fs) { +// if x, ok := fs.(*MemMapFs); ok { +// x.List() +// } +// } diff --git a/vendor/github.com/spf13/afero/os.go b/vendor/github.com/spf13/afero/os.go new file mode 100644 index 0000000000..13cc1b84c9 --- /dev/null +++ b/vendor/github.com/spf13/afero/os.go @@ -0,0 +1,101 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" + "time" +) + +var _ Lstater = (*OsFs)(nil) + +// OsFs is a Fs implementation that uses functions provided by the os package. +// +// For details in any method, check the documentation of the os package +// (http://golang.org/pkg/os/). +type OsFs struct{} + +func NewOsFs() Fs { + return &OsFs{} +} + +func (OsFs) Name() string { return "OsFs" } + +func (OsFs) Create(name string) (File, error) { + f, e := os.Create(name) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) Mkdir(name string, perm os.FileMode) error { + return os.Mkdir(name, perm) +} + +func (OsFs) MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +func (OsFs) Open(name string) (File, error) { + f, e := os.Open(name) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + f, e := os.OpenFile(name, flag, perm) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) Remove(name string) error { + return os.Remove(name) +} + +func (OsFs) RemoveAll(path string) error { + return os.RemoveAll(path) +} + +func (OsFs) Rename(oldname, newname string) error { + return os.Rename(oldname, newname) +} + +func (OsFs) Stat(name string) (os.FileInfo, error) { + return os.Stat(name) +} + +func (OsFs) Chmod(name string, mode os.FileMode) error { + return os.Chmod(name, mode) +} + +func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + return os.Chtimes(name, atime, mtime) +} + +func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + fi, err := os.Lstat(name) + return fi, true, err +} diff --git a/vendor/github.com/spf13/afero/path.go b/vendor/github.com/spf13/afero/path.go new file mode 100644 index 0000000000..18f60a0f6b --- /dev/null +++ b/vendor/github.com/spf13/afero/path.go @@ -0,0 +1,106 @@ +// Copyright ©2015 The Go Authors +// Copyright ©2015 Steve Francia +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" + "path/filepath" + "sort" +) + +// readDirNames reads the directory named by dirname and returns +// a sorted list of directory entries. +// adapted from https://golang.org/src/path/filepath/path.go +func readDirNames(fs Fs, dirname string) ([]string, error) { + f, err := fs.Open(dirname) + if err != nil { + return nil, err + } + names, err := f.Readdirnames(-1) + f.Close() + if err != nil { + return nil, err + } + sort.Strings(names) + return names, nil +} + +// walk recursively descends path, calling walkFn +// adapted from https://golang.org/src/path/filepath/path.go +func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error { + err := walkFn(path, info, nil) + if err != nil { + if info.IsDir() && err == filepath.SkipDir { + return nil + } + return err + } + + if !info.IsDir() { + return nil + } + + names, err := readDirNames(fs, path) + if err != nil { + return walkFn(path, info, err) + } + + for _, name := range names { + filename := filepath.Join(path, name) + fileInfo, err := lstatIfPossible(fs, filename) + if err != nil { + if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { + return err + } + } else { + err = walk(fs, filename, fileInfo, walkFn) + if err != nil { + if !fileInfo.IsDir() || err != filepath.SkipDir { + return err + } + } + } + } + return nil +} + +// if the filesystem supports it, use Lstat, else use fs.Stat +func lstatIfPossible(fs Fs, path string) (os.FileInfo, error) { + if lfs, ok := fs.(Lstater); ok { + fi, _, err := lfs.LstatIfPossible(path) + return fi, err + } + return fs.Stat(path) +} + +// Walk walks the file tree rooted at root, calling walkFn for each file or +// directory in the tree, including root. All errors that arise visiting files +// and directories are filtered by walkFn. The files are walked in lexical +// order, which makes the output deterministic but means that for very +// large directories Walk can be inefficient. +// Walk does not follow symbolic links. + +func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error { + return Walk(a.Fs, root, walkFn) +} + +func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error { + info, err := lstatIfPossible(fs, root) + if err != nil { + return walkFn(root, nil, err) + } + return walk(fs, root, info, walkFn) +} diff --git a/vendor/github.com/spf13/afero/readonlyfs.go b/vendor/github.com/spf13/afero/readonlyfs.go new file mode 100644 index 0000000000..c6376ec373 --- /dev/null +++ b/vendor/github.com/spf13/afero/readonlyfs.go @@ -0,0 +1,80 @@ +package afero + +import ( + "os" + "syscall" + "time" +) + +var _ Lstater = (*ReadOnlyFs)(nil) + +type ReadOnlyFs struct { + source Fs +} + +func NewReadOnlyFs(source Fs) Fs { + return &ReadOnlyFs{source: source} +} + +func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) { + return ReadDir(r.source, name) +} + +func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Name() string { + return "ReadOnlyFilter" +} + +func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) { + return r.source.Stat(name) +} + +func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + if lsf, ok := r.source.(Lstater); ok { + return lsf.LstatIfPossible(name) + } + fi, err := r.Stat(name) + return fi, false, err +} + +func (r *ReadOnlyFs) Rename(o, n string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) RemoveAll(p string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Remove(n string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + return nil, syscall.EPERM + } + return r.source.OpenFile(name, flag, perm) +} + +func (r *ReadOnlyFs) Open(n string) (File, error) { + return r.source.Open(n) +} + +func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Create(n string) (File, error) { + return nil, syscall.EPERM +} diff --git a/vendor/github.com/spf13/afero/regexpfs.go b/vendor/github.com/spf13/afero/regexpfs.go new file mode 100644 index 0000000000..9d92dbc051 --- /dev/null +++ b/vendor/github.com/spf13/afero/regexpfs.go @@ -0,0 +1,214 @@ +package afero + +import ( + "os" + "regexp" + "syscall" + "time" +) + +// The RegexpFs filters files (not directories) by regular expression. Only +// files matching the given regexp will be allowed, all others get a ENOENT error ( +// "No such file or directory"). +// +type RegexpFs struct { + re *regexp.Regexp + source Fs +} + +func NewRegexpFs(source Fs, re *regexp.Regexp) Fs { + return &RegexpFs{source: source, re: re} +} + +type RegexpFile struct { + f File + re *regexp.Regexp +} + +func (r *RegexpFs) matchesName(name string) error { + if r.re == nil { + return nil + } + if r.re.MatchString(name) { + return nil + } + return syscall.ENOENT +} + +func (r *RegexpFs) dirOrMatches(name string) error { + dir, err := IsDir(r.source, name) + if err != nil { + return err + } + if dir { + return nil + } + return r.matchesName(name) +} + +func (r *RegexpFs) Chtimes(name string, a, m time.Time) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Chtimes(name, a, m) +} + +func (r *RegexpFs) Chmod(name string, mode os.FileMode) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Chmod(name, mode) +} + +func (r *RegexpFs) Name() string { + return "RegexpFs" +} + +func (r *RegexpFs) Stat(name string) (os.FileInfo, error) { + if err := r.dirOrMatches(name); err != nil { + return nil, err + } + return r.source.Stat(name) +} + +func (r *RegexpFs) Rename(oldname, newname string) error { + dir, err := IsDir(r.source, oldname) + if err != nil { + return err + } + if dir { + return nil + } + if err := r.matchesName(oldname); err != nil { + return err + } + if err := r.matchesName(newname); err != nil { + return err + } + return r.source.Rename(oldname, newname) +} + +func (r *RegexpFs) RemoveAll(p string) error { + dir, err := IsDir(r.source, p) + if err != nil { + return err + } + if !dir { + if err := r.matchesName(p); err != nil { + return err + } + } + return r.source.RemoveAll(p) +} + +func (r *RegexpFs) Remove(name string) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Remove(name) +} + +func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + if err := r.dirOrMatches(name); err != nil { + return nil, err + } + return r.source.OpenFile(name, flag, perm) +} + +func (r *RegexpFs) Open(name string) (File, error) { + dir, err := IsDir(r.source, name) + if err != nil { + return nil, err + } + if !dir { + if err := r.matchesName(name); err != nil { + return nil, err + } + } + f, err := r.source.Open(name) + return &RegexpFile{f: f, re: r.re}, nil +} + +func (r *RegexpFs) Mkdir(n string, p os.FileMode) error { + return r.source.Mkdir(n, p) +} + +func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error { + return r.source.MkdirAll(n, p) +} + +func (r *RegexpFs) Create(name string) (File, error) { + if err := r.matchesName(name); err != nil { + return nil, err + } + return r.source.Create(name) +} + +func (f *RegexpFile) Close() error { + return f.f.Close() +} + +func (f *RegexpFile) Read(s []byte) (int, error) { + return f.f.Read(s) +} + +func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) { + return f.f.ReadAt(s, o) +} + +func (f *RegexpFile) Seek(o int64, w int) (int64, error) { + return f.f.Seek(o, w) +} + +func (f *RegexpFile) Write(s []byte) (int, error) { + return f.f.Write(s) +} + +func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) { + return f.f.WriteAt(s, o) +} + +func (f *RegexpFile) Name() string { + return f.f.Name() +} + +func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) { + var rfi []os.FileInfo + rfi, err = f.f.Readdir(c) + if err != nil { + return nil, err + } + for _, i := range rfi { + if i.IsDir() || f.re.MatchString(i.Name()) { + fi = append(fi, i) + } + } + return fi, nil +} + +func (f *RegexpFile) Readdirnames(c int) (n []string, err error) { + fi, err := f.Readdir(c) + if err != nil { + return nil, err + } + for _, s := range fi { + n = append(n, s.Name()) + } + return n, nil +} + +func (f *RegexpFile) Stat() (os.FileInfo, error) { + return f.f.Stat() +} + +func (f *RegexpFile) Sync() error { + return f.f.Sync() +} + +func (f *RegexpFile) Truncate(s int64) error { + return f.f.Truncate(s) +} + +func (f *RegexpFile) WriteString(s string) (int, error) { + return f.f.WriteString(s) +} diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go new file mode 100644 index 0000000000..abcf12d36d --- /dev/null +++ b/vendor/github.com/spf13/afero/unionFile.go @@ -0,0 +1,316 @@ +package afero + +import ( + "io" + "os" + "path/filepath" + "syscall" +) + +// The UnionFile implements the afero.File interface and will be returned +// when reading a directory present at least in the overlay or opening a file +// for writing. +// +// The calls to +// Readdir() and Readdirnames() merge the file os.FileInfo / names from the +// base and the overlay - for files present in both layers, only those +// from the overlay will be used. +// +// When opening files for writing (Create() / OpenFile() with the right flags) +// the operations will be done in both layers, starting with the overlay. A +// successful read in the overlay will move the cursor position in the base layer +// by the number of bytes read. +type UnionFile struct { + Base File + Layer File + Merger DirsMerger + off int + files []os.FileInfo +} + +func (f *UnionFile) Close() error { + // first close base, so we have a newer timestamp in the overlay. If we'd close + // the overlay first, we'd get a cacheStale the next time we access this file + // -> cache would be useless ;-) + if f.Base != nil { + f.Base.Close() + } + if f.Layer != nil { + return f.Layer.Close() + } + return BADFD +} + +func (f *UnionFile) Read(s []byte) (int, error) { + if f.Layer != nil { + n, err := f.Layer.Read(s) + if (err == nil || err == io.EOF) && f.Base != nil { + // advance the file position also in the base file, the next + // call may be a write at this position (or a seek with SEEK_CUR) + if _, seekErr := f.Base.Seek(int64(n), os.SEEK_CUR); seekErr != nil { + // only overwrite err in case the seek fails: we need to + // report an eventual io.EOF to the caller + err = seekErr + } + } + return n, err + } + if f.Base != nil { + return f.Base.Read(s) + } + return 0, BADFD +} + +func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) { + if f.Layer != nil { + n, err := f.Layer.ReadAt(s, o) + if (err == nil || err == io.EOF) && f.Base != nil { + _, err = f.Base.Seek(o+int64(n), os.SEEK_SET) + } + return n, err + } + if f.Base != nil { + return f.Base.ReadAt(s, o) + } + return 0, BADFD +} + +func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) { + if f.Layer != nil { + pos, err = f.Layer.Seek(o, w) + if (err == nil || err == io.EOF) && f.Base != nil { + _, err = f.Base.Seek(o, w) + } + return pos, err + } + if f.Base != nil { + return f.Base.Seek(o, w) + } + return 0, BADFD +} + +func (f *UnionFile) Write(s []byte) (n int, err error) { + if f.Layer != nil { + n, err = f.Layer.Write(s) + if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark? + _, err = f.Base.Write(s) + } + return n, err + } + if f.Base != nil { + return f.Base.Write(s) + } + return 0, BADFD +} + +func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) { + if f.Layer != nil { + n, err = f.Layer.WriteAt(s, o) + if err == nil && f.Base != nil { + _, err = f.Base.WriteAt(s, o) + } + return n, err + } + if f.Base != nil { + return f.Base.WriteAt(s, o) + } + return 0, BADFD +} + +func (f *UnionFile) Name() string { + if f.Layer != nil { + return f.Layer.Name() + } + return f.Base.Name() +} + +// DirsMerger is how UnionFile weaves two directories together. +// It takes the FileInfo slices from the layer and the base and returns a +// single view. +type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) + +var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) { + var files = make(map[string]os.FileInfo) + + for _, fi := range lofi { + files[fi.Name()] = fi + } + + for _, fi := range bofi { + if _, exists := files[fi.Name()]; !exists { + files[fi.Name()] = fi + } + } + + rfi := make([]os.FileInfo, len(files)) + + i := 0 + for _, fi := range files { + rfi[i] = fi + i++ + } + + return rfi, nil + +} + +// Readdir will weave the two directories together and +// return a single view of the overlayed directories +// At the end of the directory view, the error is io.EOF. +func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) { + var merge DirsMerger = f.Merger + if merge == nil { + merge = defaultUnionMergeDirsFn + } + + if f.off == 0 { + var lfi []os.FileInfo + if f.Layer != nil { + lfi, err = f.Layer.Readdir(-1) + if err != nil { + return nil, err + } + } + + var bfi []os.FileInfo + if f.Base != nil { + bfi, err = f.Base.Readdir(-1) + if err != nil { + return nil, err + } + + } + merged, err := merge(lfi, bfi) + if err != nil { + return nil, err + } + f.files = append(f.files, merged...) + } + + if f.off >= len(f.files) { + return nil, io.EOF + } + + if c == -1 { + return f.files[f.off:], nil + } + + if c > len(f.files) { + c = len(f.files) + } + + defer func() { f.off += c }() + return f.files[f.off:c], nil +} + +func (f *UnionFile) Readdirnames(c int) ([]string, error) { + rfi, err := f.Readdir(c) + if err != nil { + return nil, err + } + var names []string + for _, fi := range rfi { + names = append(names, fi.Name()) + } + return names, nil +} + +func (f *UnionFile) Stat() (os.FileInfo, error) { + if f.Layer != nil { + return f.Layer.Stat() + } + if f.Base != nil { + return f.Base.Stat() + } + return nil, BADFD +} + +func (f *UnionFile) Sync() (err error) { + if f.Layer != nil { + err = f.Layer.Sync() + if err == nil && f.Base != nil { + err = f.Base.Sync() + } + return err + } + if f.Base != nil { + return f.Base.Sync() + } + return BADFD +} + +func (f *UnionFile) Truncate(s int64) (err error) { + if f.Layer != nil { + err = f.Layer.Truncate(s) + if err == nil && f.Base != nil { + err = f.Base.Truncate(s) + } + return err + } + if f.Base != nil { + return f.Base.Truncate(s) + } + return BADFD +} + +func (f *UnionFile) WriteString(s string) (n int, err error) { + if f.Layer != nil { + n, err = f.Layer.WriteString(s) + if err == nil && f.Base != nil { + _, err = f.Base.WriteString(s) + } + return n, err + } + if f.Base != nil { + return f.Base.WriteString(s) + } + return 0, BADFD +} + +func copyToLayer(base Fs, layer Fs, name string) error { + bfh, err := base.Open(name) + if err != nil { + return err + } + defer bfh.Close() + + // First make sure the directory exists + exists, err := Exists(layer, filepath.Dir(name)) + if err != nil { + return err + } + if !exists { + err = layer.MkdirAll(filepath.Dir(name), 0777) // FIXME? + if err != nil { + return err + } + } + + // Create the file on the overlay + lfh, err := layer.Create(name) + if err != nil { + return err + } + n, err := io.Copy(lfh, bfh) + if err != nil { + // If anything fails, clean up the file + layer.Remove(name) + lfh.Close() + return err + } + + bfi, err := bfh.Stat() + if err != nil || bfi.Size() != n { + layer.Remove(name) + lfh.Close() + return syscall.EIO + } + + err = lfh.Close() + if err != nil { + layer.Remove(name) + lfh.Close() + return err + } + return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime()) +} diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go new file mode 100644 index 0000000000..4f253f481e --- /dev/null +++ b/vendor/github.com/spf13/afero/util.go @@ -0,0 +1,330 @@ +// Copyright ©2015 Steve Francia +// Portions Copyright ©2015 The Hugo Authors +// Portions Copyright 2016-present Bjørn Erik Pedersen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "unicode" + + "golang.org/x/text/transform" + "golang.org/x/text/unicode/norm" +) + +// Filepath separator defined by os.Separator. +const FilePathSeparator = string(filepath.Separator) + +// Takes a reader and a path and writes the content +func (a Afero) WriteReader(path string, r io.Reader) (err error) { + return WriteReader(a.Fs, path, r) +} + +func WriteReader(fs Fs, path string, r io.Reader) (err error) { + dir, _ := filepath.Split(path) + ospath := filepath.FromSlash(dir) + + if ospath != "" { + err = fs.MkdirAll(ospath, 0777) // rwx, rw, r + if err != nil { + if err != os.ErrExist { + return err + } + } + } + + file, err := fs.Create(path) + if err != nil { + return + } + defer file.Close() + + _, err = io.Copy(file, r) + return +} + +// Same as WriteReader but checks to see if file/directory already exists. +func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) { + return SafeWriteReader(a.Fs, path, r) +} + +func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) { + dir, _ := filepath.Split(path) + ospath := filepath.FromSlash(dir) + + if ospath != "" { + err = fs.MkdirAll(ospath, 0777) // rwx, rw, r + if err != nil { + return + } + } + + exists, err := Exists(fs, path) + if err != nil { + return + } + if exists { + return fmt.Errorf("%v already exists", path) + } + + file, err := fs.Create(path) + if err != nil { + return + } + defer file.Close() + + _, err = io.Copy(file, r) + return +} + +func (a Afero) GetTempDir(subPath string) string { + return GetTempDir(a.Fs, subPath) +} + +// GetTempDir returns the default temp directory with trailing slash +// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx +func GetTempDir(fs Fs, subPath string) string { + addSlash := func(p string) string { + if FilePathSeparator != p[len(p)-1:] { + p = p + FilePathSeparator + } + return p + } + dir := addSlash(os.TempDir()) + + if subPath != "" { + // preserve windows backslash :-( + if FilePathSeparator == "\\" { + subPath = strings.Replace(subPath, "\\", "____", -1) + } + dir = dir + UnicodeSanitize((subPath)) + if FilePathSeparator == "\\" { + dir = strings.Replace(dir, "____", "\\", -1) + } + + if exists, _ := Exists(fs, dir); exists { + return addSlash(dir) + } + + err := fs.MkdirAll(dir, 0777) + if err != nil { + panic(err) + } + dir = addSlash(dir) + } + return dir +} + +// Rewrite string to remove non-standard path characters +func UnicodeSanitize(s string) string { + source := []rune(s) + target := make([]rune, 0, len(source)) + + for _, r := range source { + if unicode.IsLetter(r) || + unicode.IsDigit(r) || + unicode.IsMark(r) || + r == '.' || + r == '/' || + r == '\\' || + r == '_' || + r == '-' || + r == '%' || + r == ' ' || + r == '#' { + target = append(target, r) + } + } + + return string(target) +} + +// Transform characters with accents into plain forms. +func NeuterAccents(s string) string { + t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC) + result, _, _ := transform.String(t, string(s)) + + return result +} + +func isMn(r rune) bool { + return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks +} + +func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) { + return FileContainsBytes(a.Fs, filename, subslice) +} + +// Check if a file contains a specified byte slice. +func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) { + f, err := fs.Open(filename) + if err != nil { + return false, err + } + defer f.Close() + + return readerContainsAny(f, subslice), nil +} + +func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) { + return FileContainsAnyBytes(a.Fs, filename, subslices) +} + +// Check if a file contains any of the specified byte slices. +func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) { + f, err := fs.Open(filename) + if err != nil { + return false, err + } + defer f.Close() + + return readerContainsAny(f, subslices...), nil +} + +// readerContains reports whether any of the subslices is within r. +func readerContainsAny(r io.Reader, subslices ...[]byte) bool { + + if r == nil || len(subslices) == 0 { + return false + } + + largestSlice := 0 + + for _, sl := range subslices { + if len(sl) > largestSlice { + largestSlice = len(sl) + } + } + + if largestSlice == 0 { + return false + } + + bufflen := largestSlice * 4 + halflen := bufflen / 2 + buff := make([]byte, bufflen) + var err error + var n, i int + + for { + i++ + if i == 1 { + n, err = io.ReadAtLeast(r, buff[:halflen], halflen) + } else { + if i != 2 { + // shift left to catch overlapping matches + copy(buff[:], buff[halflen:]) + } + n, err = io.ReadAtLeast(r, buff[halflen:], halflen) + } + + if n > 0 { + for _, sl := range subslices { + if bytes.Contains(buff, sl) { + return true + } + } + } + + if err != nil { + break + } + } + return false +} + +func (a Afero) DirExists(path string) (bool, error) { + return DirExists(a.Fs, path) +} + +// DirExists checks if a path exists and is a directory. +func DirExists(fs Fs, path string) (bool, error) { + fi, err := fs.Stat(path) + if err == nil && fi.IsDir() { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func (a Afero) IsDir(path string) (bool, error) { + return IsDir(a.Fs, path) +} + +// IsDir checks if a given path is a directory. +func IsDir(fs Fs, path string) (bool, error) { + fi, err := fs.Stat(path) + if err != nil { + return false, err + } + return fi.IsDir(), nil +} + +func (a Afero) IsEmpty(path string) (bool, error) { + return IsEmpty(a.Fs, path) +} + +// IsEmpty checks if a given file or directory is empty. +func IsEmpty(fs Fs, path string) (bool, error) { + if b, _ := Exists(fs, path); !b { + return false, fmt.Errorf("%q path does not exist", path) + } + fi, err := fs.Stat(path) + if err != nil { + return false, err + } + if fi.IsDir() { + f, err := fs.Open(path) + if err != nil { + return false, err + } + defer f.Close() + list, err := f.Readdir(-1) + return len(list) == 0, nil + } + return fi.Size() == 0, nil +} + +func (a Afero) Exists(path string) (bool, error) { + return Exists(a.Fs, path) +} + +// Check if a file or directory exists. +func Exists(fs Fs, path string) (bool, error) { + _, err := fs.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string { + combinedPath := filepath.Join(basePathFs.path, relativePath) + if parent, ok := basePathFs.source.(*BasePathFs); ok { + return FullBaseFsPath(parent, combinedPath) + } + + return combinedPath +} diff --git a/vendor/github.com/ulikunitz/xz/README.md b/vendor/github.com/ulikunitz/xz/README.md index 969ae7a00b..0a2dc8284f 100644 --- a/vendor/github.com/ulikunitz/xz/README.md +++ b/vendor/github.com/ulikunitz/xz/README.md @@ -12,46 +12,48 @@ have been developed over a long time and are highly optimized. However there are a number of improvements planned and I'm very optimistic about parallel compression and decompression. Stay tuned! -# Using the API +## Using the API The following example program shows how to use the API. - package main - - import ( - "bytes" - "io" - "log" - "os" - - "github.com/ulikunitz/xz" - ) - - func main() { - const text = "The quick brown fox jumps over the lazy dog.\n" - var buf bytes.Buffer - // compress text - w, err := xz.NewWriter(&buf) - if err != nil { - log.Fatalf("xz.NewWriter error %s", err) - } - if _, err := io.WriteString(w, text); err != nil { - log.Fatalf("WriteString error %s", err) - } - if err := w.Close(); err != nil { - log.Fatalf("w.Close error %s", err) - } - // decompress buffer and write output to stdout - r, err := xz.NewReader(&buf) - if err != nil { - log.Fatalf("NewReader error %s", err) - } - if _, err = io.Copy(os.Stdout, r); err != nil { - log.Fatalf("io.Copy error %s", err) - } +```go +package main + +import ( + "bytes" + "io" + "log" + "os" + + "github.com/ulikunitz/xz" +) + +func main() { + const text = "The quick brown fox jumps over the lazy dog.\n" + var buf bytes.Buffer + // compress text + w, err := xz.NewWriter(&buf) + if err != nil { + log.Fatalf("xz.NewWriter error %s", err) } + if _, err := io.WriteString(w, text); err != nil { + log.Fatalf("WriteString error %s", err) + } + if err := w.Close(); err != nil { + log.Fatalf("w.Close error %s", err) + } + // decompress buffer and write output to stdout + r, err := xz.NewReader(&buf) + if err != nil { + log.Fatalf("NewReader error %s", err) + } + if _, err = io.Copy(os.Stdout, r); err != nil { + log.Fatalf("io.Copy error %s", err) + } +} +``` -# Using the gxz compression tool +## Using the gxz compression tool The package includes a gxz command line utility for compression and decompression. diff --git a/vendor/github.com/ulikunitz/xz/TODO.md b/vendor/github.com/ulikunitz/xz/TODO.md index 7b34c0caa7..c10e51b9aa 100644 --- a/vendor/github.com/ulikunitz/xz/TODO.md +++ b/vendor/github.com/ulikunitz/xz/TODO.md @@ -86,6 +86,10 @@ ## Log +### 2018-10-28 + +Release v0.5.5 fixes issues #19 observing ErrLimit outputs. + ### 2017-06-05 Release v0.5.4 fixes issues #15 of another problem with the padding size @@ -102,7 +106,7 @@ Release v0.5.2 became necessary to allow the decoding of xz files with 4-byte padding in the block header. Many thanks to Greg, who reported the issue. -### 2016-07-23 +### 2016-07-23 Release v0.5.1 became necessary to fix problems with 32-bit platforms. Many thanks to Bruno Brigas, who reported the issue. @@ -194,7 +198,7 @@ and lzma.Writer and fixed the error handling. By computing the bit length of the LZMA operations I was able to improve the greedy algorithm implementation. By using an 8 MByte buffer the compression rate was not as good as for xz but already better then -gzip default. +gzip default. Compression is currently slow, but this is something we will be able to improve over time. @@ -213,7 +217,7 @@ The package lzb contains now the basic implementation for creating or reading LZMA byte streams. It allows the support for the implementation of the DAG-shortest-path algorithm for the compression function. -### 2015-04-23 +### 2015-04-23 Completed yesterday the lzbase classes. I'm a little bit concerned that using the components may require too much code, but on the other hand @@ -242,7 +246,7 @@ right lzbase.Reader and lzbase.Writer. As a start I have implemented ReaderState and WriterState to ensure that the state for reading is only used by readers and WriterState only -used by Writers. +used by Writers. ### 2015-04-20 @@ -274,7 +278,7 @@ almost all files from lzma. ### 2015-03-31 -Removed only a TODO item. +Removed only a TODO item. However in Francesco Campoy's presentation "Go for Javaneros (Javaïstes?)" is the the idea that using an embedded field E, all the diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoder.go b/vendor/github.com/ulikunitz/xz/lzma/encoder.go index 18ce009928..fe1900a66e 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/encoder.go +++ b/vendor/github.com/ulikunitz/xz/lzma/encoder.go @@ -11,7 +11,7 @@ import ( // opLenMargin provides the upper limit of the number of bytes required // to encode a single operation. -const opLenMargin = 10 +const opLenMargin = 16 // compressFlags control the compression process. type compressFlags uint32 diff --git a/vendor/github.com/vmihailenco/msgpack/.travis.yml b/vendor/github.com/vmihailenco/msgpack/.travis.yml new file mode 100644 index 0000000000..0c2f74ed46 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/.travis.yml @@ -0,0 +1,17 @@ +sudo: false +language: go + +go: + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + - tip + +matrix: + allow_failures: + - go: tip + +install: + - go get gopkg.in/check.v1 diff --git a/vendor/github.com/vmihailenco/msgpack/CHANGELOG.md b/vendor/github.com/vmihailenco/msgpack/CHANGELOG.md new file mode 100644 index 0000000000..9a4f38a93c --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/CHANGELOG.md @@ -0,0 +1,24 @@ +## 3.4 + +- Encode, Decode, Marshal, and Unmarshal are changed to accept single argument. EncodeMulti and DecodeMulti are added as replacement. +- Added EncodeInt8/16/32/64 and EncodeUint8/16/32/64. +- Encoder changed to preserve type of numbers instead of chosing most compact encoding. The old behavior can be achieved with Encoder.UseCompactEncoding. + +## v3.3 + +- `msgpack:",inline"` tag is restored to force inlining structs. + +## v3.2 + +- Decoding extension types returns pointer to the value instead of the value. Fixes #153 + +## v3 + +- gopkg.in is not supported any more. Update import path to github.com/vmihailenco/msgpack. +- Msgpack maps are decoded into map[string]interface{} by default. +- EncodeSliceLen is removed in favor of EncodeArrayLen. DecodeSliceLen is removed in favor of DecodeArrayLen. +- Embedded structs are automatically inlined where possible. +- Time is encoded using extension as described in https://github.com/msgpack/msgpack/pull/209. Old format is supported as well. +- EncodeInt8/16/32/64 is replaced with EncodeInt. EncodeUint8/16/32/64 is replaced with EncodeUint. There should be no performance differences. +- DecodeInterface can now return int8/16/32 and uint8/16/32. +- PeekCode returns codes.Code instead of byte. diff --git a/vendor/github.com/vmihailenco/msgpack/LICENSE b/vendor/github.com/vmihailenco/msgpack/LICENSE new file mode 100644 index 0000000000..b749d07079 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2013 The github.com/vmihailenco/msgpack Authors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/vmihailenco/msgpack/Makefile b/vendor/github.com/vmihailenco/msgpack/Makefile new file mode 100644 index 0000000000..b62ae6a46e --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/Makefile @@ -0,0 +1,5 @@ +all: + go test ./... + env GOOS=linux GOARCH=386 go test ./... + go test ./... -short -race + go vet diff --git a/vendor/github.com/vmihailenco/msgpack/README.md b/vendor/github.com/vmihailenco/msgpack/README.md new file mode 100644 index 0000000000..0c75ae16e2 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/README.md @@ -0,0 +1,69 @@ +# MessagePack encoding for Golang + +[![Build Status](https://travis-ci.org/vmihailenco/msgpack.svg?branch=v2)](https://travis-ci.org/vmihailenco/msgpack) +[![GoDoc](https://godoc.org/github.com/vmihailenco/msgpack?status.svg)](https://godoc.org/github.com/vmihailenco/msgpack) + +Supports: +- Primitives, arrays, maps, structs, time.Time and interface{}. +- Appengine *datastore.Key and datastore.Cursor. +- [CustomEncoder](https://godoc.org/github.com/vmihailenco/msgpack#example-CustomEncoder)/CustomDecoder interfaces for custom encoding. +- [Extensions](https://godoc.org/github.com/vmihailenco/msgpack#example-RegisterExt) to encode type information. +- Renaming fields via `msgpack:"my_field_name"`. +- Omitting individual empty fields via `msgpack:",omitempty"` tag or all [empty fields in a struct](https://godoc.org/github.com/vmihailenco/msgpack#example-Marshal--OmitEmpty). +- [Map keys sorting](https://godoc.org/github.com/vmihailenco/msgpack#Encoder.SortMapKeys). +- Encoding/decoding all [structs as arrays](https://godoc.org/github.com/vmihailenco/msgpack#Encoder.StructAsArray) or [individual structs](https://godoc.org/github.com/vmihailenco/msgpack#example-Marshal--AsArray). +- [Encoder.UseJSONTag](https://godoc.org/github.com/vmihailenco/msgpack#Encoder.UseJSONTag) with [Decoder.UseJSONTag](https://godoc.org/github.com/vmihailenco/msgpack#Decoder.UseJSONTag) can turn msgpack into drop-in replacement for JSON. +- Simple but very fast and efficient [queries](https://godoc.org/github.com/vmihailenco/msgpack#example-Decoder-Query). + +API docs: https://godoc.org/github.com/vmihailenco/msgpack. +Examples: https://godoc.org/github.com/vmihailenco/msgpack#pkg-examples. + +## Installation + +Install: + +```shell +go get -u github.com/vmihailenco/msgpack +``` + +## Quickstart + +```go +func ExampleMarshal() { + type Item struct { + Foo string + } + + b, err := msgpack.Marshal(&Item{Foo: "bar"}) + if err != nil { + panic(err) + } + + var item Item + err = msgpack.Unmarshal(b, &item) + if err != nil { + panic(err) + } + fmt.Println(item.Foo) + // Output: bar +} +``` + +## Benchmark + +``` +BenchmarkStructVmihailencoMsgpack-4 200000 12814 ns/op 2128 B/op 26 allocs/op +BenchmarkStructUgorjiGoMsgpack-4 100000 17678 ns/op 3616 B/op 70 allocs/op +BenchmarkStructUgorjiGoCodec-4 100000 19053 ns/op 7346 B/op 23 allocs/op +BenchmarkStructJSON-4 20000 69438 ns/op 7864 B/op 26 allocs/op +BenchmarkStructGOB-4 10000 104331 ns/op 14664 B/op 278 allocs/op +``` + +## Howto + +Please go through [examples](https://godoc.org/github.com/vmihailenco/msgpack#pkg-examples) to get an idea how to use this package. + +## See also + +- [Golang PostgreSQL ORM](https://github.com/go-pg/pg) +- [Golang message task queue](https://github.com/go-msgqueue/msgqueue) diff --git a/vendor/github.com/vmihailenco/msgpack/appengine.go b/vendor/github.com/vmihailenco/msgpack/appengine.go new file mode 100644 index 0000000000..e8e91e53f3 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/appengine.go @@ -0,0 +1,64 @@ +// +build appengine + +package msgpack + +import ( + "reflect" + + ds "google.golang.org/appengine/datastore" +) + +func init() { + Register((*ds.Key)(nil), encodeDatastoreKeyValue, decodeDatastoreKeyValue) + Register((*ds.Cursor)(nil), encodeDatastoreCursorValue, decodeDatastoreCursorValue) +} + +func EncodeDatastoreKey(e *Encoder, key *ds.Key) error { + if key == nil { + return e.EncodeNil() + } + return e.EncodeString(key.Encode()) +} + +func encodeDatastoreKeyValue(e *Encoder, v reflect.Value) error { + key := v.Interface().(*ds.Key) + return EncodeDatastoreKey(e, key) +} + +func DecodeDatastoreKey(d *Decoder) (*ds.Key, error) { + v, err := d.DecodeString() + if err != nil { + return nil, err + } + if v == "" { + return nil, nil + } + return ds.DecodeKey(v) +} + +func decodeDatastoreKeyValue(d *Decoder, v reflect.Value) error { + key, err := DecodeDatastoreKey(d) + if err != nil { + return err + } + v.Set(reflect.ValueOf(key)) + return nil +} + +func encodeDatastoreCursorValue(e *Encoder, v reflect.Value) error { + cursor := v.Interface().(ds.Cursor) + return e.Encode(cursor.String()) +} + +func decodeDatastoreCursorValue(d *Decoder, v reflect.Value) error { + s, err := d.DecodeString() + if err != nil { + return err + } + cursor, err := ds.DecodeCursor(s) + if err != nil { + return err + } + v.Set(reflect.ValueOf(cursor)) + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/codes/codes.go b/vendor/github.com/vmihailenco/msgpack/codes/codes.go new file mode 100644 index 0000000000..906af376e0 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/codes/codes.go @@ -0,0 +1,86 @@ +package codes + +type Code byte + +var ( + PosFixedNumHigh Code = 0x7f + NegFixedNumLow Code = 0xe0 + + Nil Code = 0xc0 + + False Code = 0xc2 + True Code = 0xc3 + + Float Code = 0xca + Double Code = 0xcb + + Uint8 Code = 0xcc + Uint16 Code = 0xcd + Uint32 Code = 0xce + Uint64 Code = 0xcf + + Int8 Code = 0xd0 + Int16 Code = 0xd1 + Int32 Code = 0xd2 + Int64 Code = 0xd3 + + FixedStrLow Code = 0xa0 + FixedStrHigh Code = 0xbf + FixedStrMask Code = 0x1f + Str8 Code = 0xd9 + Str16 Code = 0xda + Str32 Code = 0xdb + + Bin8 Code = 0xc4 + Bin16 Code = 0xc5 + Bin32 Code = 0xc6 + + FixedArrayLow Code = 0x90 + FixedArrayHigh Code = 0x9f + FixedArrayMask Code = 0xf + Array16 Code = 0xdc + Array32 Code = 0xdd + + FixedMapLow Code = 0x80 + FixedMapHigh Code = 0x8f + FixedMapMask Code = 0xf + Map16 Code = 0xde + Map32 Code = 0xdf + + FixExt1 Code = 0xd4 + FixExt2 Code = 0xd5 + FixExt4 Code = 0xd6 + FixExt8 Code = 0xd7 + FixExt16 Code = 0xd8 + Ext8 Code = 0xc7 + Ext16 Code = 0xc8 + Ext32 Code = 0xc9 +) + +func IsFixedNum(c Code) bool { + return c <= PosFixedNumHigh || c >= NegFixedNumLow +} + +func IsFixedMap(c Code) bool { + return c >= FixedMapLow && c <= FixedMapHigh +} + +func IsFixedArray(c Code) bool { + return c >= FixedArrayLow && c <= FixedArrayHigh +} + +func IsFixedString(c Code) bool { + return c >= FixedStrLow && c <= FixedStrHigh +} + +func IsString(c Code) bool { + return IsFixedString(c) || c == Str8 || c == Str16 || c == Str32 +} + +func IsFixedExt(c Code) bool { + return c >= FixExt1 && c <= FixExt16 +} + +func IsExt(c Code) bool { + return IsFixedExt(c) || c == Ext8 || c == Ext16 || c == Ext32 +} diff --git a/vendor/github.com/vmihailenco/msgpack/decode.go b/vendor/github.com/vmihailenco/msgpack/decode.go new file mode 100644 index 0000000000..7414100409 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/decode.go @@ -0,0 +1,546 @@ +package msgpack + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "reflect" + "time" + + "github.com/vmihailenco/msgpack/codes" +) + +const bytesAllocLimit = 1024 * 1024 // 1mb + +type bufReader interface { + io.Reader + io.ByteScanner +} + +func newBufReader(r io.Reader) bufReader { + if br, ok := r.(bufReader); ok { + return br + } + return bufio.NewReader(r) +} + +func makeBuffer() []byte { + return make([]byte, 0, 64) +} + +// Unmarshal decodes the MessagePack-encoded data and stores the result +// in the value pointed to by v. +func Unmarshal(data []byte, v interface{}) error { + return NewDecoder(bytes.NewReader(data)).Decode(v) +} + +type Decoder struct { + r io.Reader + s io.ByteScanner + buf []byte + + extLen int + rec []byte // accumulates read data if not nil + + useLoose bool + useJSONTag bool + + decodeMapFunc func(*Decoder) (interface{}, error) +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read data from r +// beyond the MessagePack values requested. Buffering can be disabled +// by passing a reader that implements io.ByteScanner interface. +func NewDecoder(r io.Reader) *Decoder { + d := &Decoder{ + buf: makeBuffer(), + } + d.resetReader(r) + return d +} + +func (d *Decoder) SetDecodeMapFunc(fn func(*Decoder) (interface{}, error)) { + d.decodeMapFunc = fn +} + +// UseDecodeInterfaceLoose causes decoder to use DecodeInterfaceLoose +// to decode msgpack value into Go interface{}. +func (d *Decoder) UseDecodeInterfaceLoose(flag bool) { + d.useLoose = flag +} + +// UseJSONTag causes the Decoder to use json struct tag as fallback option +// if there is no msgpack tag. +func (d *Decoder) UseJSONTag(v bool) *Decoder { + d.useJSONTag = v + return d +} + +func (d *Decoder) Reset(r io.Reader) error { + d.resetReader(r) + return nil +} + +func (d *Decoder) resetReader(r io.Reader) { + reader := newBufReader(r) + d.r = reader + d.s = reader +} + +func (d *Decoder) Decode(v interface{}) error { + var err error + switch v := v.(type) { + case *string: + if v != nil { + *v, err = d.DecodeString() + return err + } + case *[]byte: + if v != nil { + return d.decodeBytesPtr(v) + } + case *int: + if v != nil { + *v, err = d.DecodeInt() + return err + } + case *int8: + if v != nil { + *v, err = d.DecodeInt8() + return err + } + case *int16: + if v != nil { + *v, err = d.DecodeInt16() + return err + } + case *int32: + if v != nil { + *v, err = d.DecodeInt32() + return err + } + case *int64: + if v != nil { + *v, err = d.DecodeInt64() + return err + } + case *uint: + if v != nil { + *v, err = d.DecodeUint() + return err + } + case *uint8: + if v != nil { + *v, err = d.DecodeUint8() + return err + } + case *uint16: + if v != nil { + *v, err = d.DecodeUint16() + return err + } + case *uint32: + if v != nil { + *v, err = d.DecodeUint32() + return err + } + case *uint64: + if v != nil { + *v, err = d.DecodeUint64() + return err + } + case *bool: + if v != nil { + *v, err = d.DecodeBool() + return err + } + case *float32: + if v != nil { + *v, err = d.DecodeFloat32() + return err + } + case *float64: + if v != nil { + *v, err = d.DecodeFloat64() + return err + } + case *[]string: + return d.decodeStringSlicePtr(v) + case *map[string]string: + return d.decodeMapStringStringPtr(v) + case *map[string]interface{}: + return d.decodeMapStringInterfacePtr(v) + case *time.Duration: + if v != nil { + vv, err := d.DecodeInt64() + *v = time.Duration(vv) + return err + } + case *time.Time: + if v != nil { + *v, err = d.DecodeTime() + return err + } + } + + vv := reflect.ValueOf(v) + if !vv.IsValid() { + return errors.New("msgpack: Decode(nil)") + } + if vv.Kind() != reflect.Ptr { + return fmt.Errorf("msgpack: Decode(nonsettable %T)", v) + } + vv = vv.Elem() + if !vv.IsValid() { + return fmt.Errorf("msgpack: Decode(nonsettable %T)", v) + } + return d.DecodeValue(vv) +} + +func (d *Decoder) DecodeMulti(v ...interface{}) error { + for _, vv := range v { + if err := d.Decode(vv); err != nil { + return err + } + } + return nil +} + +func (d *Decoder) decodeInterfaceCond() (interface{}, error) { + if d.useLoose { + return d.DecodeInterfaceLoose() + } + return d.DecodeInterface() +} + +func (d *Decoder) DecodeValue(v reflect.Value) error { + decode := getDecoder(v.Type()) + return decode(d, v) +} + +func (d *Decoder) DecodeNil() error { + c, err := d.readCode() + if err != nil { + return err + } + if c != codes.Nil { + return fmt.Errorf("msgpack: invalid code=%x decoding nil", c) + } + return nil +} + +func (d *Decoder) decodeNilValue(v reflect.Value) error { + err := d.DecodeNil() + if v.IsNil() { + return err + } + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + v.Set(reflect.Zero(v.Type())) + return err +} + +func (d *Decoder) DecodeBool() (bool, error) { + c, err := d.readCode() + if err != nil { + return false, err + } + return d.bool(c) +} + +func (d *Decoder) bool(c codes.Code) (bool, error) { + if c == codes.False { + return false, nil + } + if c == codes.True { + return true, nil + } + return false, fmt.Errorf("msgpack: invalid code=%x decoding bool", c) +} + +// DecodeInterface decodes value into interface. It returns following types: +// - nil, +// - bool, +// - int8, int16, int32, int64, +// - uint8, uint16, uint32, uint64, +// - float32 and float64, +// - string, +// - []byte, +// - slices of any of the above, +// - maps of any of the above. +// +// DecodeInterface should be used only when you don't know the type of value +// you are decoding. For example, if you are decoding number it is better to use +// DecodeInt64 for negative numbers and DecodeUint64 for positive numbers. +func (d *Decoder) DecodeInterface() (interface{}, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + + if codes.IsFixedNum(c) { + return int8(c), nil + } + if codes.IsFixedMap(c) { + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.DecodeMap() + } + if codes.IsFixedArray(c) { + return d.decodeSlice(c) + } + if codes.IsFixedString(c) { + return d.string(c) + } + + switch c { + case codes.Nil: + return nil, nil + case codes.False, codes.True: + return d.bool(c) + case codes.Float: + return d.float32(c) + case codes.Double: + return d.float64(c) + case codes.Uint8: + return d.uint8() + case codes.Uint16: + return d.uint16() + case codes.Uint32: + return d.uint32() + case codes.Uint64: + return d.uint64() + case codes.Int8: + return d.int8() + case codes.Int16: + return d.int16() + case codes.Int32: + return d.int32() + case codes.Int64: + return d.int64() + case codes.Bin8, codes.Bin16, codes.Bin32: + return d.bytes(c, nil) + case codes.Str8, codes.Str16, codes.Str32: + return d.string(c) + case codes.Array16, codes.Array32: + return d.decodeSlice(c) + case codes.Map16, codes.Map32: + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.DecodeMap() + case codes.FixExt1, codes.FixExt2, codes.FixExt4, codes.FixExt8, codes.FixExt16, + codes.Ext8, codes.Ext16, codes.Ext32: + return d.extInterface(c) + } + + return 0, fmt.Errorf("msgpack: unknown code %x decoding interface{}", c) +} + +// DecodeInterfaceLoose is like DecodeInterface except that: +// - int8, int16, and int32 are converted to int64, +// - uint8, uint16, and uint32 are converted to uint64, +// - float32 is converted to float64. +func (d *Decoder) DecodeInterfaceLoose() (interface{}, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + + if codes.IsFixedNum(c) { + return int64(c), nil + } + if codes.IsFixedMap(c) { + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.DecodeMap() + } + if codes.IsFixedArray(c) { + return d.decodeSlice(c) + } + if codes.IsFixedString(c) { + return d.string(c) + } + + switch c { + case codes.Nil: + return nil, nil + case codes.False, codes.True: + return d.bool(c) + case codes.Float, codes.Double: + return d.float64(c) + case codes.Uint8, codes.Uint16, codes.Uint32, codes.Uint64: + return d.uint(c) + case codes.Int8, codes.Int16, codes.Int32, codes.Int64: + return d.int(c) + case codes.Bin8, codes.Bin16, codes.Bin32: + return d.bytes(c, nil) + case codes.Str8, codes.Str16, codes.Str32: + return d.string(c) + case codes.Array16, codes.Array32: + return d.decodeSlice(c) + case codes.Map16, codes.Map32: + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.DecodeMap() + case codes.FixExt1, codes.FixExt2, codes.FixExt4, codes.FixExt8, codes.FixExt16, + codes.Ext8, codes.Ext16, codes.Ext32: + return d.extInterface(c) + } + + return 0, fmt.Errorf("msgpack: unknown code %x decoding interface{}", c) +} + +// Skip skips next value. +func (d *Decoder) Skip() error { + c, err := d.readCode() + if err != nil { + return err + } + + if codes.IsFixedNum(c) { + return nil + } else if codes.IsFixedMap(c) { + return d.skipMap(c) + } else if codes.IsFixedArray(c) { + return d.skipSlice(c) + } else if codes.IsFixedString(c) { + return d.skipBytes(c) + } + + switch c { + case codes.Nil, codes.False, codes.True: + return nil + case codes.Uint8, codes.Int8: + return d.skipN(1) + case codes.Uint16, codes.Int16: + return d.skipN(2) + case codes.Uint32, codes.Int32, codes.Float: + return d.skipN(4) + case codes.Uint64, codes.Int64, codes.Double: + return d.skipN(8) + case codes.Bin8, codes.Bin16, codes.Bin32: + return d.skipBytes(c) + case codes.Str8, codes.Str16, codes.Str32: + return d.skipBytes(c) + case codes.Array16, codes.Array32: + return d.skipSlice(c) + case codes.Map16, codes.Map32: + return d.skipMap(c) + case codes.FixExt1, codes.FixExt2, codes.FixExt4, codes.FixExt8, codes.FixExt16, + codes.Ext8, codes.Ext16, codes.Ext32: + return d.skipExt(c) + } + + return fmt.Errorf("msgpack: unknown code %x", c) +} + +// PeekCode returns the next MessagePack code without advancing the reader. +// Subpackage msgpack/codes contains list of available codes. +func (d *Decoder) PeekCode() (codes.Code, error) { + c, err := d.s.ReadByte() + if err != nil { + return 0, err + } + return codes.Code(c), d.s.UnreadByte() +} + +func (d *Decoder) hasNilCode() bool { + code, err := d.PeekCode() + return err == nil && code == codes.Nil +} + +func (d *Decoder) readCode() (codes.Code, error) { + d.extLen = 0 + c, err := d.s.ReadByte() + if err != nil { + return 0, err + } + if d.rec != nil { + d.rec = append(d.rec, c) + } + return codes.Code(c), nil +} + +func (d *Decoder) readFull(b []byte) error { + _, err := io.ReadFull(d.r, b) + if err != nil { + return err + } + if d.rec != nil { + d.rec = append(d.rec, b...) + } + return nil +} + +func (d *Decoder) readN(n int) ([]byte, error) { + buf, err := readN(d.r, d.buf, n) + if err != nil { + return nil, err + } + d.buf = buf + if d.rec != nil { + d.rec = append(d.rec, buf...) + } + return buf, nil +} + +func readN(r io.Reader, b []byte, n int) ([]byte, error) { + if b == nil { + if n == 0 { + return make([]byte, 0), nil + } + if n <= bytesAllocLimit { + b = make([]byte, n) + } else { + b = make([]byte, bytesAllocLimit) + } + } + + if n <= cap(b) { + b = b[:n] + _, err := io.ReadFull(r, b) + return b, err + } + b = b[:cap(b)] + + var pos int + for { + alloc := n - len(b) + if alloc > bytesAllocLimit { + alloc = bytesAllocLimit + } + b = append(b, make([]byte, alloc)...) + + _, err := io.ReadFull(r, b[pos:]) + if err != nil { + return nil, err + } + + if len(b) == n { + break + } + pos = len(b) + } + + return b, nil +} + +func min(a, b int) int { + if a <= b { + return a + } + return b +} diff --git a/vendor/github.com/vmihailenco/msgpack/decode_map.go b/vendor/github.com/vmihailenco/msgpack/decode_map.go new file mode 100644 index 0000000000..b542a7541f --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/decode_map.go @@ -0,0 +1,338 @@ +package msgpack + +import ( + "errors" + "fmt" + "reflect" + + "github.com/vmihailenco/msgpack/codes" +) + +const mapElemsAllocLimit = 1e4 + +var mapStringStringPtrType = reflect.TypeOf((*map[string]string)(nil)) +var mapStringStringType = mapStringStringPtrType.Elem() + +var mapStringInterfacePtrType = reflect.TypeOf((*map[string]interface{})(nil)) +var mapStringInterfaceType = mapStringInterfacePtrType.Elem() + +var errInvalidCode = errors.New("invalid code") + +func decodeMapValue(d *Decoder, v reflect.Value) error { + size, err := d.DecodeMapLen() + if err != nil { + return err + } + + typ := v.Type() + if size == -1 { + v.Set(reflect.Zero(typ)) + return nil + } + + if v.IsNil() { + v.Set(reflect.MakeMap(typ)) + } + if size == 0 { + return nil + } + + return decodeMapValueSize(d, v, size) +} + +func decodeMapValueSize(d *Decoder, v reflect.Value, size int) error { + typ := v.Type() + keyType := typ.Key() + valueType := typ.Elem() + + for i := 0; i < size; i++ { + mk := reflect.New(keyType).Elem() + if err := d.DecodeValue(mk); err != nil { + return err + } + + mv := reflect.New(valueType).Elem() + if err := d.DecodeValue(mv); err != nil { + return err + } + + v.SetMapIndex(mk, mv) + } + + return nil +} + +// DecodeMapLen decodes map length. Length is -1 when map is nil. +func (d *Decoder) DecodeMapLen() (int, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + + if codes.IsExt(c) { + if err = d.skipExtHeader(c); err != nil { + return 0, err + } + + c, err = d.readCode() + if err != nil { + return 0, err + } + } + return d.mapLen(c) +} + +func (d *Decoder) mapLen(c codes.Code) (int, error) { + size, err := d._mapLen(c) + err = expandInvalidCodeMapLenError(c, err) + return size, err +} + +func (d *Decoder) _mapLen(c codes.Code) (int, error) { + if c == codes.Nil { + return -1, nil + } + if c >= codes.FixedMapLow && c <= codes.FixedMapHigh { + return int(c & codes.FixedMapMask), nil + } + if c == codes.Map16 { + size, err := d.uint16() + return int(size), err + } + if c == codes.Map32 { + size, err := d.uint32() + return int(size), err + } + return 0, errInvalidCode +} + +func expandInvalidCodeMapLenError(c codes.Code, err error) error { + if err == errInvalidCode { + return fmt.Errorf("msgpack: invalid code=%x decoding map length", c) + } + return err +} + +func decodeMapStringStringValue(d *Decoder, v reflect.Value) error { + mptr := v.Addr().Convert(mapStringStringPtrType).Interface().(*map[string]string) + return d.decodeMapStringStringPtr(mptr) +} + +func (d *Decoder) decodeMapStringStringPtr(ptr *map[string]string) error { + size, err := d.DecodeMapLen() + if err != nil { + return err + } + if size == -1 { + *ptr = nil + return nil + } + + m := *ptr + if m == nil { + *ptr = make(map[string]string, min(size, mapElemsAllocLimit)) + m = *ptr + } + + for i := 0; i < size; i++ { + mk, err := d.DecodeString() + if err != nil { + return err + } + mv, err := d.DecodeString() + if err != nil { + return err + } + m[mk] = mv + } + + return nil +} + +func decodeMapStringInterfaceValue(d *Decoder, v reflect.Value) error { + ptr := v.Addr().Convert(mapStringInterfacePtrType).Interface().(*map[string]interface{}) + return d.decodeMapStringInterfacePtr(ptr) +} + +func (d *Decoder) decodeMapStringInterfacePtr(ptr *map[string]interface{}) error { + n, err := d.DecodeMapLen() + if err != nil { + return err + } + if n == -1 { + *ptr = nil + return nil + } + + m := *ptr + if m == nil { + *ptr = make(map[string]interface{}, min(n, mapElemsAllocLimit)) + m = *ptr + } + + for i := 0; i < n; i++ { + mk, err := d.DecodeString() + if err != nil { + return err + } + mv, err := d.decodeInterfaceCond() + if err != nil { + return err + } + m[mk] = mv + } + + return nil +} + +func (d *Decoder) DecodeMap() (interface{}, error) { + if d.decodeMapFunc != nil { + return d.decodeMapFunc(d) + } + + size, err := d.DecodeMapLen() + if err != nil { + return nil, err + } + if size == -1 { + return nil, nil + } + if size == 0 { + return make(map[string]interface{}), nil + } + + code, err := d.PeekCode() + if err != nil { + return nil, err + } + + if codes.IsString(code) { + return d.decodeMapStringInterfaceSize(size) + } + + key, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + + value, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + + keyType := reflect.TypeOf(key) + valueType := reflect.TypeOf(value) + mapType := reflect.MapOf(keyType, valueType) + mapValue := reflect.MakeMap(mapType) + + mapValue.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(value)) + size-- + + err = decodeMapValueSize(d, mapValue, size) + if err != nil { + return nil, err + } + + return mapValue.Interface(), nil +} + +func (d *Decoder) decodeMapStringInterfaceSize(size int) (map[string]interface{}, error) { + m := make(map[string]interface{}, min(size, mapElemsAllocLimit)) + for i := 0; i < size; i++ { + mk, err := d.DecodeString() + if err != nil { + return nil, err + } + mv, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + m[mk] = mv + } + return m, nil +} + +func (d *Decoder) skipMap(c codes.Code) error { + n, err := d.mapLen(c) + if err != nil { + return err + } + for i := 0; i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + if err := d.Skip(); err != nil { + return err + } + } + return nil +} + +func decodeStructValue(d *Decoder, v reflect.Value) error { + c, err := d.readCode() + if err != nil { + return err + } + + var isArray bool + + n, err := d._mapLen(c) + if err != nil { + var err2 error + n, err2 = d.arrayLen(c) + if err2 != nil { + return expandInvalidCodeMapLenError(c, err) + } + isArray = true + } + if n == -1 { + if err = mustSet(v); err != nil { + return err + } + v.Set(reflect.Zero(v.Type())) + return nil + } + + var fields *fields + if d.useJSONTag { + fields = jsonStructs.Fields(v.Type()) + } else { + fields = structs.Fields(v.Type()) + } + + if isArray { + for i, f := range fields.List { + if i >= n { + break + } + if err := f.DecodeValue(d, v); err != nil { + return err + } + } + // Skip extra values. + for i := len(fields.List); i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + } + return nil + } + + for i := 0; i < n; i++ { + name, err := d.DecodeString() + if err != nil { + return err + } + if f := fields.Table[name]; f != nil { + if err := f.DecodeValue(d, v); err != nil { + return err + } + } else { + if err := d.Skip(); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/decode_number.go b/vendor/github.com/vmihailenco/msgpack/decode_number.go new file mode 100644 index 0000000000..15019cc97a --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/decode_number.go @@ -0,0 +1,307 @@ +package msgpack + +import ( + "fmt" + "math" + "reflect" + + "github.com/vmihailenco/msgpack/codes" +) + +func (d *Decoder) skipN(n int) error { + _, err := d.readN(n) + return err +} + +func (d *Decoder) uint8() (uint8, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return uint8(c), nil +} + +func (d *Decoder) int8() (int8, error) { + n, err := d.uint8() + return int8(n), err +} + +func (d *Decoder) uint16() (uint16, error) { + b, err := d.readN(2) + if err != nil { + return 0, err + } + return (uint16(b[0]) << 8) | uint16(b[1]), nil +} + +func (d *Decoder) int16() (int16, error) { + n, err := d.uint16() + return int16(n), err +} + +func (d *Decoder) uint32() (uint32, error) { + b, err := d.readN(4) + if err != nil { + return 0, err + } + n := (uint32(b[0]) << 24) | + (uint32(b[1]) << 16) | + (uint32(b[2]) << 8) | + uint32(b[3]) + return n, nil +} + +func (d *Decoder) int32() (int32, error) { + n, err := d.uint32() + return int32(n), err +} + +func (d *Decoder) uint64() (uint64, error) { + b, err := d.readN(8) + if err != nil { + return 0, err + } + n := (uint64(b[0]) << 56) | + (uint64(b[1]) << 48) | + (uint64(b[2]) << 40) | + (uint64(b[3]) << 32) | + (uint64(b[4]) << 24) | + (uint64(b[5]) << 16) | + (uint64(b[6]) << 8) | + uint64(b[7]) + return n, nil +} + +func (d *Decoder) int64() (int64, error) { + n, err := d.uint64() + return int64(n), err +} + +// DecodeUint64 decodes msgpack int8/16/32/64 and uint8/16/32/64 +// into Go uint64. +func (d *Decoder) DecodeUint64() (uint64, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.uint(c) +} + +func (d *Decoder) uint(c codes.Code) (uint64, error) { + if c == codes.Nil { + return 0, nil + } + if codes.IsFixedNum(c) { + return uint64(int8(c)), nil + } + switch c { + case codes.Uint8: + n, err := d.uint8() + return uint64(n), err + case codes.Int8: + n, err := d.int8() + return uint64(n), err + case codes.Uint16: + n, err := d.uint16() + return uint64(n), err + case codes.Int16: + n, err := d.int16() + return uint64(n), err + case codes.Uint32: + n, err := d.uint32() + return uint64(n), err + case codes.Int32: + n, err := d.int32() + return uint64(n), err + case codes.Uint64, codes.Int64: + return d.uint64() + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding uint64", c) +} + +// DecodeInt64 decodes msgpack int8/16/32/64 and uint8/16/32/64 +// into Go int64. +func (d *Decoder) DecodeInt64() (int64, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.int(c) +} + +func (d *Decoder) int(c codes.Code) (int64, error) { + if c == codes.Nil { + return 0, nil + } + if codes.IsFixedNum(c) { + return int64(int8(c)), nil + } + switch c { + case codes.Uint8: + n, err := d.uint8() + return int64(n), err + case codes.Int8: + n, err := d.uint8() + return int64(int8(n)), err + case codes.Uint16: + n, err := d.uint16() + return int64(n), err + case codes.Int16: + n, err := d.uint16() + return int64(int16(n)), err + case codes.Uint32: + n, err := d.uint32() + return int64(n), err + case codes.Int32: + n, err := d.uint32() + return int64(int32(n)), err + case codes.Uint64, codes.Int64: + n, err := d.uint64() + return int64(n), err + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding int64", c) +} + +func (d *Decoder) DecodeFloat32() (float32, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.float32(c) +} + +func (d *Decoder) float32(c codes.Code) (float32, error) { + if c == codes.Float { + n, err := d.uint32() + if err != nil { + return 0, err + } + return math.Float32frombits(n), nil + } + + n, err := d.int(c) + if err != nil { + return 0, fmt.Errorf("msgpack: invalid code=%x decoding float32", c) + } + return float32(n), nil +} + +// DecodeFloat64 decodes msgpack float32/64 into Go float64. +func (d *Decoder) DecodeFloat64() (float64, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.float64(c) +} + +func (d *Decoder) float64(c codes.Code) (float64, error) { + switch c { + case codes.Float: + n, err := d.float32(c) + if err != nil { + return 0, err + } + return float64(n), nil + case codes.Double: + n, err := d.uint64() + if err != nil { + return 0, err + } + return math.Float64frombits(n), nil + } + + n, err := d.int(c) + if err != nil { + return 0, fmt.Errorf("msgpack: invalid code=%x decoding float32", c) + } + return float64(n), nil +} + +func (d *Decoder) DecodeUint() (uint, error) { + n, err := d.DecodeUint64() + return uint(n), err +} + +func (d *Decoder) DecodeUint8() (uint8, error) { + n, err := d.DecodeUint64() + return uint8(n), err +} + +func (d *Decoder) DecodeUint16() (uint16, error) { + n, err := d.DecodeUint64() + return uint16(n), err +} + +func (d *Decoder) DecodeUint32() (uint32, error) { + n, err := d.DecodeUint64() + return uint32(n), err +} + +func (d *Decoder) DecodeInt() (int, error) { + n, err := d.DecodeInt64() + return int(n), err +} + +func (d *Decoder) DecodeInt8() (int8, error) { + n, err := d.DecodeInt64() + return int8(n), err +} + +func (d *Decoder) DecodeInt16() (int16, error) { + n, err := d.DecodeInt64() + return int16(n), err +} + +func (d *Decoder) DecodeInt32() (int32, error) { + n, err := d.DecodeInt64() + return int32(n), err +} + +func decodeFloat32Value(d *Decoder, v reflect.Value) error { + f, err := d.DecodeFloat32() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetFloat(float64(f)) + return nil +} + +func decodeFloat64Value(d *Decoder, v reflect.Value) error { + f, err := d.DecodeFloat64() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetFloat(f) + return nil +} + +func decodeInt64Value(d *Decoder, v reflect.Value) error { + n, err := d.DecodeInt64() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetInt(n) + return nil +} + +func decodeUint64Value(d *Decoder, v reflect.Value) error { + n, err := d.DecodeUint64() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetUint(n) + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/decode_query.go b/vendor/github.com/vmihailenco/msgpack/decode_query.go new file mode 100644 index 0000000000..d680be80c0 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/decode_query.go @@ -0,0 +1,158 @@ +package msgpack + +import ( + "fmt" + "strconv" + "strings" + + "github.com/vmihailenco/msgpack/codes" +) + +type queryResult struct { + query string + key string + hasAsterisk bool + + values []interface{} +} + +func (q *queryResult) nextKey() { + ind := strings.IndexByte(q.query, '.') + if ind == -1 { + q.key = q.query + q.query = "" + return + } + q.key = q.query[:ind] + q.query = q.query[ind+1:] +} + +// Query extracts data specified by the query from the msgpack stream skipping +// any other data. Query consists of map keys and array indexes separated with dot, +// e.g. key1.0.key2. +func (d *Decoder) Query(query string) ([]interface{}, error) { + res := queryResult{ + query: query, + } + if err := d.query(&res); err != nil { + return nil, err + } + return res.values, nil +} + +func (d *Decoder) query(q *queryResult) error { + q.nextKey() + if q.key == "" { + v, err := d.decodeInterfaceCond() + if err != nil { + return err + } + q.values = append(q.values, v) + return nil + } + + code, err := d.PeekCode() + if err != nil { + return err + } + + switch { + case code == codes.Map16 || code == codes.Map32 || codes.IsFixedMap(code): + err = d.queryMapKey(q) + case code == codes.Array16 || code == codes.Array32 || codes.IsFixedArray(code): + err = d.queryArrayIndex(q) + default: + err = fmt.Errorf("msgpack: unsupported code=%x decoding key=%q", code, q.key) + } + return err +} + +func (d *Decoder) queryMapKey(q *queryResult) error { + n, err := d.DecodeMapLen() + if err != nil { + return err + } + if n == -1 { + return nil + } + + for i := 0; i < n; i++ { + k, err := d.bytesNoCopy() + if err != nil { + return err + } + + if string(k) == q.key { + if err := d.query(q); err != nil { + return err + } + if q.hasAsterisk { + return d.skipNext((n - i - 1) * 2) + } + return nil + } + + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) queryArrayIndex(q *queryResult) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + if n == -1 { + return nil + } + + if q.key == "*" { + q.hasAsterisk = true + + query := q.query + for i := 0; i < n; i++ { + q.query = query + if err := d.query(q); err != nil { + return err + } + } + + q.hasAsterisk = false + return nil + } + + ind, err := strconv.Atoi(q.key) + if err != nil { + return err + } + + for i := 0; i < n; i++ { + if i == ind { + if err := d.query(q); err != nil { + return err + } + if q.hasAsterisk { + return d.skipNext(n - i - 1) + } + return nil + } + + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) skipNext(n int) error { + for i := 0; i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/decode_slice.go b/vendor/github.com/vmihailenco/msgpack/decode_slice.go new file mode 100644 index 0000000000..778d80b937 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/decode_slice.go @@ -0,0 +1,193 @@ +package msgpack + +import ( + "fmt" + "reflect" + + "github.com/vmihailenco/msgpack/codes" +) + +const sliceElemsAllocLimit = 1e4 + +var sliceStringPtrType = reflect.TypeOf((*[]string)(nil)) + +// DecodeArrayLen decodes array length. Length is -1 when array is nil. +func (d *Decoder) DecodeArrayLen() (int, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.arrayLen(c) +} + +func (d *Decoder) arrayLen(c codes.Code) (int, error) { + if c == codes.Nil { + return -1, nil + } else if c >= codes.FixedArrayLow && c <= codes.FixedArrayHigh { + return int(c & codes.FixedArrayMask), nil + } + switch c { + case codes.Array16: + n, err := d.uint16() + return int(n), err + case codes.Array32: + n, err := d.uint32() + return int(n), err + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding array length", c) +} + +func decodeStringSliceValue(d *Decoder, v reflect.Value) error { + ptr := v.Addr().Convert(sliceStringPtrType).Interface().(*[]string) + return d.decodeStringSlicePtr(ptr) +} + +func (d *Decoder) decodeStringSlicePtr(ptr *[]string) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + if n == -1 { + return nil + } + + ss := setStringsCap(*ptr, n) + for i := 0; i < n; i++ { + s, err := d.DecodeString() + if err != nil { + return err + } + ss = append(ss, s) + } + *ptr = ss + + return nil +} + +func setStringsCap(s []string, n int) []string { + if n > sliceElemsAllocLimit { + n = sliceElemsAllocLimit + } + + if s == nil { + return make([]string, 0, n) + } + + if cap(s) >= n { + return s[:0] + } + + s = s[:cap(s)] + s = append(s, make([]string, n-len(s))...) + return s[:0] +} + +func decodeSliceValue(d *Decoder, v reflect.Value) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + + if n == -1 { + v.Set(reflect.Zero(v.Type())) + return nil + } + if n == 0 && v.IsNil() { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + return nil + } + + if v.Cap() >= n { + v.Set(v.Slice(0, n)) + } else if v.Len() < v.Cap() { + v.Set(v.Slice(0, v.Cap())) + } + + for i := 0; i < n; i++ { + if i >= v.Len() { + v.Set(growSliceValue(v, n)) + } + sv := v.Index(i) + if err := d.DecodeValue(sv); err != nil { + return err + } + } + + return nil +} + +func growSliceValue(v reflect.Value, n int) reflect.Value { + diff := n - v.Len() + if diff > sliceElemsAllocLimit { + diff = sliceElemsAllocLimit + } + v = reflect.AppendSlice(v, reflect.MakeSlice(v.Type(), diff, diff)) + return v +} + +func decodeArrayValue(d *Decoder, v reflect.Value) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + + if n == -1 { + return nil + } + + if n > v.Len() { + return fmt.Errorf("%s len is %d, but msgpack has %d elements", v.Type(), v.Len(), n) + } + for i := 0; i < n; i++ { + sv := v.Index(i) + if err := d.DecodeValue(sv); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) DecodeSlice() ([]interface{}, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + return d.decodeSlice(c) +} + +func (d *Decoder) decodeSlice(c codes.Code) ([]interface{}, error) { + n, err := d.arrayLen(c) + if err != nil { + return nil, err + } + if n == -1 { + return nil, nil + } + + s := make([]interface{}, 0, min(n, sliceElemsAllocLimit)) + for i := 0; i < n; i++ { + v, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + s = append(s, v) + } + + return s, nil +} + +func (d *Decoder) skipSlice(c codes.Code) error { + n, err := d.arrayLen(c) + if err != nil { + return err + } + + for i := 0; i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/decode_string.go b/vendor/github.com/vmihailenco/msgpack/decode_string.go new file mode 100644 index 0000000000..5402022ee9 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/decode_string.go @@ -0,0 +1,175 @@ +package msgpack + +import ( + "fmt" + "reflect" + + "github.com/vmihailenco/msgpack/codes" +) + +func (d *Decoder) bytesLen(c codes.Code) (int, error) { + if c == codes.Nil { + return -1, nil + } else if codes.IsFixedString(c) { + return int(c & codes.FixedStrMask), nil + } + switch c { + case codes.Str8, codes.Bin8: + n, err := d.uint8() + return int(n), err + case codes.Str16, codes.Bin16: + n, err := d.uint16() + return int(n), err + case codes.Str32, codes.Bin32: + n, err := d.uint32() + return int(n), err + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding bytes length", c) +} + +func (d *Decoder) DecodeString() (string, error) { + c, err := d.readCode() + if err != nil { + return "", err + } + return d.string(c) +} + +func (d *Decoder) string(c codes.Code) (string, error) { + n, err := d.bytesLen(c) + if err != nil { + return "", err + } + if n == -1 { + return "", nil + } + b, err := d.readN(n) + return string(b), err +} + +func decodeStringValue(d *Decoder, v reflect.Value) error { + s, err := d.DecodeString() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetString(s) + return nil +} + +func (d *Decoder) DecodeBytesLen() (int, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.bytesLen(c) +} + +func (d *Decoder) DecodeBytes() ([]byte, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + return d.bytes(c, nil) +} + +func (d *Decoder) bytes(c codes.Code, b []byte) ([]byte, error) { + n, err := d.bytesLen(c) + if err != nil { + return nil, err + } + if n == -1 { + return nil, nil + } + return readN(d.r, b, n) +} + +func (d *Decoder) bytesNoCopy() ([]byte, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + n, err := d.bytesLen(c) + if err != nil { + return nil, err + } + if n == -1 { + return nil, nil + } + return d.readN(n) +} + +func (d *Decoder) decodeBytesPtr(ptr *[]byte) error { + c, err := d.readCode() + if err != nil { + return err + } + return d.bytesPtr(c, ptr) +} + +func (d *Decoder) bytesPtr(c codes.Code, ptr *[]byte) error { + n, err := d.bytesLen(c) + if err != nil { + return err + } + if n == -1 { + *ptr = nil + return nil + } + + *ptr, err = readN(d.r, *ptr, n) + return err +} + +func (d *Decoder) skipBytes(c codes.Code) error { + n, err := d.bytesLen(c) + if err != nil { + return err + } + if n == -1 { + return nil + } + return d.skipN(n) +} + +func decodeBytesValue(d *Decoder, v reflect.Value) error { + c, err := d.readCode() + if err != nil { + return err + } + + b, err := d.bytes(c, v.Bytes()) + if err != nil { + return err + } + + if err = mustSet(v); err != nil { + return err + } + v.SetBytes(b) + + return nil +} + +func decodeByteArrayValue(d *Decoder, v reflect.Value) error { + c, err := d.readCode() + if err != nil { + return err + } + + n, err := d.bytesLen(c) + if err != nil { + return err + } + if n == -1 { + return nil + } + if n > v.Len() { + return fmt.Errorf("%s len is %d, but msgpack has %d elements", v.Type(), v.Len(), n) + } + + b := v.Slice(0, n).Bytes() + return d.readFull(b) +} diff --git a/vendor/github.com/vmihailenco/msgpack/decode_value.go b/vendor/github.com/vmihailenco/msgpack/decode_value.go new file mode 100644 index 0000000000..7b858b5fc5 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/decode_value.go @@ -0,0 +1,234 @@ +package msgpack + +import ( + "errors" + "fmt" + "reflect" +) + +var interfaceType = reflect.TypeOf((*interface{})(nil)).Elem() +var stringType = reflect.TypeOf((*string)(nil)).Elem() + +var valueDecoders []decoderFunc + +func init() { + valueDecoders = []decoderFunc{ + reflect.Bool: decodeBoolValue, + reflect.Int: decodeInt64Value, + reflect.Int8: decodeInt64Value, + reflect.Int16: decodeInt64Value, + reflect.Int32: decodeInt64Value, + reflect.Int64: decodeInt64Value, + reflect.Uint: decodeUint64Value, + reflect.Uint8: decodeUint64Value, + reflect.Uint16: decodeUint64Value, + reflect.Uint32: decodeUint64Value, + reflect.Uint64: decodeUint64Value, + reflect.Float32: decodeFloat32Value, + reflect.Float64: decodeFloat64Value, + reflect.Complex64: decodeUnsupportedValue, + reflect.Complex128: decodeUnsupportedValue, + reflect.Array: decodeArrayValue, + reflect.Chan: decodeUnsupportedValue, + reflect.Func: decodeUnsupportedValue, + reflect.Interface: decodeInterfaceValue, + reflect.Map: decodeMapValue, + reflect.Ptr: decodeUnsupportedValue, + reflect.Slice: decodeSliceValue, + reflect.String: decodeStringValue, + reflect.Struct: decodeStructValue, + reflect.UnsafePointer: decodeUnsupportedValue, + } +} + +func mustSet(v reflect.Value) error { + if !v.CanSet() { + return fmt.Errorf("msgpack: Decode(nonsettable %s)", v.Type()) + } + return nil +} + +func getDecoder(typ reflect.Type) decoderFunc { + kind := typ.Kind() + + decoder, ok := typDecMap[typ] + if ok { + return decoder + } + + if typ.Implements(customDecoderType) { + return decodeCustomValue + } + if typ.Implements(unmarshalerType) { + return unmarshalValue + } + + // Addressable struct field value. + if kind != reflect.Ptr { + ptr := reflect.PtrTo(typ) + if ptr.Implements(customDecoderType) { + return decodeCustomValueAddr + } + if ptr.Implements(unmarshalerType) { + return unmarshalValueAddr + } + } + + switch kind { + case reflect.Ptr: + return ptrDecoderFunc(typ) + case reflect.Slice: + elem := typ.Elem() + switch elem.Kind() { + case reflect.Uint8: + return decodeBytesValue + } + switch elem { + case stringType: + return decodeStringSliceValue + } + case reflect.Array: + if typ.Elem().Kind() == reflect.Uint8 { + return decodeByteArrayValue + } + case reflect.Map: + if typ.Key() == stringType { + switch typ.Elem() { + case stringType: + return decodeMapStringStringValue + case interfaceType: + return decodeMapStringInterfaceValue + } + } + } + return valueDecoders[kind] +} + +func ptrDecoderFunc(typ reflect.Type) decoderFunc { + decoder := getDecoder(typ.Elem()) + return func(d *Decoder, v reflect.Value) error { + if d.hasNilCode() { + if err := mustSet(v); err != nil { + return err + } + if !v.IsNil() { + v.Set(reflect.Zero(v.Type())) + } + return d.DecodeNil() + } + if v.IsNil() { + if err := mustSet(v); err != nil { + return err + } + v.Set(reflect.New(v.Type().Elem())) + } + return decoder(d, v.Elem()) + } +} + +func decodeCustomValueAddr(d *Decoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Decode(nonaddressable %T)", v.Interface()) + } + return decodeCustomValue(d, v.Addr()) +} + +func decodeCustomValue(d *Decoder, v reflect.Value) error { + if d.hasNilCode() { + return d.decodeNilValue(v) + } + + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + + decoder := v.Interface().(CustomDecoder) + return decoder.DecodeMsgpack(d) +} + +func unmarshalValueAddr(d *Decoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Decode(nonaddressable %T)", v.Interface()) + } + return unmarshalValue(d, v.Addr()) +} + +func unmarshalValue(d *Decoder, v reflect.Value) error { + if d.hasNilCode() { + return d.decodeNilValue(v) + } + + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + + if d.extLen != 0 { + b, err := d.readN(d.extLen) + if err != nil { + return err + } + d.rec = b + } else { + d.rec = makeBuffer() + if err := d.Skip(); err != nil { + return err + } + } + + unmarshaler := v.Interface().(Unmarshaler) + err := unmarshaler.UnmarshalMsgpack(d.rec) + d.rec = nil + return err +} + +func decodeBoolValue(d *Decoder, v reflect.Value) error { + flag, err := d.DecodeBool() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetBool(flag) + return nil +} + +func decodeInterfaceValue(d *Decoder, v reflect.Value) error { + if v.IsNil() { + return d.interfaceValue(v) + } + + elem := v.Elem() + if !elem.CanAddr() { + if d.hasNilCode() { + v.Set(reflect.Zero(v.Type())) + return d.DecodeNil() + } + } + + return d.DecodeValue(elem) +} + +func (d *Decoder) interfaceValue(v reflect.Value) error { + vv, err := d.decodeInterfaceCond() + if err != nil { + return err + } + + if vv != nil { + if v.Type() == errorType { + if vv, ok := vv.(string); ok { + v.Set(reflect.ValueOf(errors.New(vv))) + return nil + } + } + + v.Set(reflect.ValueOf(vv)) + } + + return nil +} + +func decodeUnsupportedValue(d *Decoder, v reflect.Value) error { + return fmt.Errorf("msgpack: Decode(unsupported %s)", v.Type()) +} diff --git a/vendor/github.com/vmihailenco/msgpack/encode.go b/vendor/github.com/vmihailenco/msgpack/encode.go new file mode 100644 index 0000000000..c2bb23cde9 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/encode.go @@ -0,0 +1,170 @@ +package msgpack + +import ( + "bytes" + "io" + "reflect" + "time" + + "github.com/vmihailenco/msgpack/codes" +) + +type writer interface { + io.Writer + WriteByte(byte) error + WriteString(string) (int, error) +} + +type byteWriter struct { + io.Writer + + buf []byte + bootstrap [64]byte +} + +func newByteWriter(w io.Writer) *byteWriter { + bw := &byteWriter{ + Writer: w, + } + bw.buf = bw.bootstrap[:] + return bw +} + +func (w *byteWriter) WriteByte(c byte) error { + w.buf = w.buf[:1] + w.buf[0] = c + _, err := w.Write(w.buf) + return err +} + +func (w *byteWriter) WriteString(s string) (int, error) { + w.buf = append(w.buf[:0], s...) + return w.Write(w.buf) +} + +// Marshal returns the MessagePack encoding of v. +func Marshal(v interface{}) ([]byte, error) { + var buf bytes.Buffer + err := NewEncoder(&buf).Encode(v) + return buf.Bytes(), err +} + +type Encoder struct { + w writer + buf []byte + + sortMapKeys bool + structAsArray bool + useJSONTag bool + useCompact bool +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + bw, ok := w.(writer) + if !ok { + bw = newByteWriter(w) + } + return &Encoder{ + w: bw, + buf: make([]byte, 9), + } +} + +// SortMapKeys causes the Encoder to encode map keys in increasing order. +// Supported map types are: +// - map[string]string +// - map[string]interface{} +func (e *Encoder) SortMapKeys(flag bool) *Encoder { + e.sortMapKeys = flag + return e +} + +// StructAsArray causes the Encoder to encode Go structs as MessagePack arrays. +func (e *Encoder) StructAsArray(flag bool) *Encoder { + e.structAsArray = flag + return e +} + +// UseJSONTag causes the Encoder to use json struct tag as fallback option +// if there is no msgpack tag. +func (e *Encoder) UseJSONTag(flag bool) *Encoder { + e.useJSONTag = flag + return e +} + +// UseCompactEncoding causes the Encoder to chose the most compact encoding. +// For example, it allows to encode Go int64 as msgpack int8 saving 7 bytes. +func (e *Encoder) UseCompactEncoding(flag bool) *Encoder { + e.useCompact = flag + return e +} + +func (e *Encoder) Encode(v interface{}) error { + switch v := v.(type) { + case nil: + return e.EncodeNil() + case string: + return e.EncodeString(v) + case []byte: + return e.EncodeBytes(v) + case int: + return e.encodeInt64Cond(int64(v)) + case int64: + return e.encodeInt64Cond(v) + case uint: + return e.encodeUint64Cond(uint64(v)) + case uint64: + return e.encodeUint64Cond(v) + case bool: + return e.EncodeBool(v) + case float32: + return e.EncodeFloat32(v) + case float64: + return e.EncodeFloat64(v) + case time.Duration: + return e.encodeInt64Cond(int64(v)) + case time.Time: + return e.EncodeTime(v) + } + return e.EncodeValue(reflect.ValueOf(v)) +} + +func (e *Encoder) EncodeMulti(v ...interface{}) error { + for _, vv := range v { + if err := e.Encode(vv); err != nil { + return err + } + } + return nil +} + +func (e *Encoder) EncodeValue(v reflect.Value) error { + fn := getEncoder(v.Type()) + return fn(e, v) +} + +func (e *Encoder) EncodeNil() error { + return e.writeCode(codes.Nil) +} + +func (e *Encoder) EncodeBool(value bool) error { + if value { + return e.writeCode(codes.True) + } + return e.writeCode(codes.False) +} + +func (e *Encoder) writeCode(c codes.Code) error { + return e.w.WriteByte(byte(c)) +} + +func (e *Encoder) write(b []byte) error { + _, err := e.w.Write(b) + return err +} + +func (e *Encoder) writeString(s string) error { + _, err := e.w.WriteString(s) + return err +} diff --git a/vendor/github.com/vmihailenco/msgpack/encode_map.go b/vendor/github.com/vmihailenco/msgpack/encode_map.go new file mode 100644 index 0000000000..a87c4075fe --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/encode_map.go @@ -0,0 +1,172 @@ +package msgpack + +import ( + "reflect" + "sort" + + "github.com/vmihailenco/msgpack/codes" +) + +func encodeMapValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + if err := e.EncodeMapLen(v.Len()); err != nil { + return err + } + + for _, key := range v.MapKeys() { + if err := e.EncodeValue(key); err != nil { + return err + } + if err := e.EncodeValue(v.MapIndex(key)); err != nil { + return err + } + } + + return nil +} + +func encodeMapStringStringValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + if err := e.EncodeMapLen(v.Len()); err != nil { + return err + } + + m := v.Convert(mapStringStringType).Interface().(map[string]string) + if e.sortMapKeys { + return e.encodeSortedMapStringString(m) + } + + for mk, mv := range m { + if err := e.EncodeString(mk); err != nil { + return err + } + if err := e.EncodeString(mv); err != nil { + return err + } + } + + return nil +} + +func encodeMapStringInterfaceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + if err := e.EncodeMapLen(v.Len()); err != nil { + return err + } + + m := v.Convert(mapStringInterfaceType).Interface().(map[string]interface{}) + if e.sortMapKeys { + return e.encodeSortedMapStringInterface(m) + } + + for mk, mv := range m { + if err := e.EncodeString(mk); err != nil { + return err + } + if err := e.Encode(mv); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) encodeSortedMapStringString(m map[string]string) error { + keys := make([]string, 0, len(m)) + for k, _ := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + err := e.EncodeString(k) + if err != nil { + return err + } + if err = e.EncodeString(m[k]); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) encodeSortedMapStringInterface(m map[string]interface{}) error { + keys := make([]string, 0, len(m)) + for k, _ := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + err := e.EncodeString(k) + if err != nil { + return err + } + if err = e.Encode(m[k]); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) EncodeMapLen(l int) error { + if l < 16 { + return e.writeCode(codes.FixedMapLow | codes.Code(l)) + } + if l < 65536 { + return e.write2(codes.Map16, uint16(l)) + } + return e.write4(codes.Map32, uint32(l)) +} + +func encodeStructValue(e *Encoder, strct reflect.Value) error { + var structFields *fields + if e.useJSONTag { + structFields = jsonStructs.Fields(strct.Type()) + } else { + structFields = structs.Fields(strct.Type()) + } + + if e.structAsArray || structFields.AsArray { + return encodeStructValueAsArray(e, strct, structFields.List) + } + fields := structFields.OmitEmpty(strct) + + if err := e.EncodeMapLen(len(fields)); err != nil { + return err + } + + for _, f := range fields { + if err := e.EncodeString(f.name); err != nil { + return err + } + if err := f.EncodeValue(e, strct); err != nil { + return err + } + } + + return nil +} + +func encodeStructValueAsArray(e *Encoder, strct reflect.Value, fields []*field) error { + if err := e.EncodeArrayLen(len(fields)); err != nil { + return err + } + for _, f := range fields { + if err := f.EncodeValue(e, strct); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/encode_number.go b/vendor/github.com/vmihailenco/msgpack/encode_number.go new file mode 100644 index 0000000000..dd7db6fdd8 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/encode_number.go @@ -0,0 +1,230 @@ +package msgpack + +import ( + "math" + "reflect" + + "github.com/vmihailenco/msgpack/codes" +) + +// EncodeUint8 encodes an uint8 in 2 bytes preserving type of the number. +func (e *Encoder) EncodeUint8(n uint8) error { + return e.write1(codes.Uint8, n) +} + +func (e *Encoder) encodeUint8Cond(n uint8) error { + if e.useCompact { + return e.EncodeUint(uint64(n)) + } + return e.EncodeUint8(n) +} + +// EncodeUint16 encodes an uint16 in 3 bytes preserving type of the number. +func (e *Encoder) EncodeUint16(n uint16) error { + return e.write2(codes.Uint16, n) +} + +func (e *Encoder) encodeUint16Cond(n uint16) error { + if e.useCompact { + return e.EncodeUint(uint64(n)) + } + return e.EncodeUint16(n) +} + +// EncodeUint32 encodes an uint16 in 5 bytes preserving type of the number. +func (e *Encoder) EncodeUint32(n uint32) error { + return e.write4(codes.Uint32, n) +} + +func (e *Encoder) encodeUint32Cond(n uint32) error { + if e.useCompact { + return e.EncodeUint(uint64(n)) + } + return e.EncodeUint32(n) +} + +// EncodeUint64 encodes an uint16 in 9 bytes preserving type of the number. +func (e *Encoder) EncodeUint64(n uint64) error { + return e.write8(codes.Uint64, n) +} + +func (e *Encoder) encodeUint64Cond(n uint64) error { + if e.useCompact { + return e.EncodeUint(n) + } + return e.EncodeUint64(n) +} + +// EncodeInt8 encodes an int8 in 2 bytes preserving type of the number. +func (e *Encoder) EncodeInt8(n int8) error { + return e.write1(codes.Int8, uint8(n)) +} + +func (e *Encoder) encodeInt8Cond(n int8) error { + if e.useCompact { + return e.EncodeInt(int64(n)) + } + return e.EncodeInt8(n) +} + +// EncodeInt16 encodes an int16 in 3 bytes preserving type of the number. +func (e *Encoder) EncodeInt16(n int16) error { + return e.write2(codes.Int16, uint16(n)) +} + +func (e *Encoder) encodeInt16Cond(n int16) error { + if e.useCompact { + return e.EncodeInt(int64(n)) + } + return e.EncodeInt16(n) +} + +// EncodeInt32 encodes an int32 in 5 bytes preserving type of the number. +func (e *Encoder) EncodeInt32(n int32) error { + return e.write4(codes.Int32, uint32(n)) +} + +func (e *Encoder) encodeInt32Cond(n int32) error { + if e.useCompact { + return e.EncodeInt(int64(n)) + } + return e.EncodeInt32(n) +} + +// EncodeInt64 encodes an int64 in 9 bytes preserving type of the number. +func (e *Encoder) EncodeInt64(n int64) error { + return e.write8(codes.Int64, uint64(n)) +} + +func (e *Encoder) encodeInt64Cond(n int64) error { + if e.useCompact { + return e.EncodeInt(n) + } + return e.EncodeInt64(n) +} + +// EncodeUnsignedNumber encodes an uint64 in 1, 2, 3, 5, or 9 bytes. +// Type of the number is lost during encoding. +func (e *Encoder) EncodeUint(n uint64) error { + if n <= math.MaxInt8 { + return e.w.WriteByte(byte(n)) + } + if n <= math.MaxUint8 { + return e.EncodeUint8(uint8(n)) + } + if n <= math.MaxUint16 { + return e.EncodeUint16(uint16(n)) + } + if n <= math.MaxUint32 { + return e.EncodeUint32(uint32(n)) + } + return e.EncodeUint64(uint64(n)) +} + +// EncodeNumber encodes an int64 in 1, 2, 3, 5, or 9 bytes. +// Type of number is lost during encoding. +func (e *Encoder) EncodeInt(n int64) error { + if n >= 0 { + return e.EncodeUint(uint64(n)) + } + if n >= int64(int8(codes.NegFixedNumLow)) { + return e.w.WriteByte(byte(n)) + } + if n >= math.MinInt8 { + return e.EncodeInt8(int8(n)) + } + if n >= math.MinInt16 { + return e.EncodeInt16(int16(n)) + } + if n >= math.MinInt32 { + return e.EncodeInt32(int32(n)) + } + return e.EncodeInt64(int64(n)) +} + +func (e *Encoder) EncodeFloat32(n float32) error { + return e.write4(codes.Float, math.Float32bits(n)) +} + +func (e *Encoder) EncodeFloat64(n float64) error { + return e.write8(codes.Double, math.Float64bits(n)) +} + +func (e *Encoder) write1(code codes.Code, n uint8) error { + e.buf = e.buf[:2] + e.buf[0] = byte(code) + e.buf[1] = byte(n) + return e.write(e.buf) +} + +func (e *Encoder) write2(code codes.Code, n uint16) error { + e.buf = e.buf[:3] + e.buf[0] = byte(code) + e.buf[1] = byte(n >> 8) + e.buf[2] = byte(n) + return e.write(e.buf) +} + +func (e *Encoder) write4(code codes.Code, n uint32) error { + e.buf = e.buf[:5] + e.buf[0] = byte(code) + e.buf[1] = byte(n >> 24) + e.buf[2] = byte(n >> 16) + e.buf[3] = byte(n >> 8) + e.buf[4] = byte(n) + return e.write(e.buf) +} + +func (e *Encoder) write8(code codes.Code, n uint64) error { + e.buf = e.buf[:9] + e.buf[0] = byte(code) + e.buf[1] = byte(n >> 56) + e.buf[2] = byte(n >> 48) + e.buf[3] = byte(n >> 40) + e.buf[4] = byte(n >> 32) + e.buf[5] = byte(n >> 24) + e.buf[6] = byte(n >> 16) + e.buf[7] = byte(n >> 8) + e.buf[8] = byte(n) + return e.write(e.buf) +} + +func encodeUint8CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint8Cond(uint8(v.Uint())) +} + +func encodeUint16CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint16Cond(uint16(v.Uint())) +} + +func encodeUint32CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint32Cond(uint32(v.Uint())) +} + +func encodeUint64CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint64Cond(v.Uint()) +} + +func encodeInt8CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt8Cond(int8(v.Int())) +} + +func encodeInt16CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt16Cond(int16(v.Int())) +} + +func encodeInt32CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt32Cond(int32(v.Int())) +} + +func encodeInt64CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt64Cond(v.Int()) +} + +func encodeFloat32Value(e *Encoder, v reflect.Value) error { + return e.EncodeFloat32(float32(v.Float())) +} + +func encodeFloat64Value(e *Encoder, v reflect.Value) error { + return e.EncodeFloat64(v.Float()) +} diff --git a/vendor/github.com/vmihailenco/msgpack/encode_slice.go b/vendor/github.com/vmihailenco/msgpack/encode_slice.go new file mode 100644 index 0000000000..5ddbd63117 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/encode_slice.go @@ -0,0 +1,124 @@ +package msgpack + +import ( + "reflect" + + "github.com/vmihailenco/msgpack/codes" +) + +func encodeStringValue(e *Encoder, v reflect.Value) error { + return e.EncodeString(v.String()) +} + +func encodeByteSliceValue(e *Encoder, v reflect.Value) error { + return e.EncodeBytes(v.Bytes()) +} + +func encodeByteArrayValue(e *Encoder, v reflect.Value) error { + if err := e.EncodeBytesLen(v.Len()); err != nil { + return err + } + + if v.CanAddr() { + b := v.Slice(0, v.Len()).Bytes() + return e.write(b) + } + + e.buf = grow(e.buf, v.Len()) + reflect.Copy(reflect.ValueOf(e.buf), v) + return e.write(e.buf) +} + +func grow(b []byte, n int) []byte { + if cap(b) >= n { + return b[:n] + } + b = b[:cap(b)] + b = append(b, make([]byte, n-len(b))...) + return b +} + +func (e *Encoder) EncodeBytesLen(l int) error { + if l < 256 { + return e.write1(codes.Bin8, uint8(l)) + } + if l < 65536 { + return e.write2(codes.Bin16, uint16(l)) + } + return e.write4(codes.Bin32, uint32(l)) +} + +func (e *Encoder) encodeStrLen(l int) error { + if l < 32 { + return e.writeCode(codes.FixedStrLow | codes.Code(l)) + } + if l < 256 { + return e.write1(codes.Str8, uint8(l)) + } + if l < 65536 { + return e.write2(codes.Str16, uint16(l)) + } + return e.write4(codes.Str32, uint32(l)) +} + +func (e *Encoder) EncodeString(v string) error { + if err := e.encodeStrLen(len(v)); err != nil { + return err + } + return e.writeString(v) +} + +func (e *Encoder) EncodeBytes(v []byte) error { + if v == nil { + return e.EncodeNil() + } + if err := e.EncodeBytesLen(len(v)); err != nil { + return err + } + return e.write(v) +} + +func (e *Encoder) EncodeArrayLen(l int) error { + if l < 16 { + return e.writeCode(codes.FixedArrayLow | codes.Code(l)) + } + if l < 65536 { + return e.write2(codes.Array16, uint16(l)) + } + return e.write4(codes.Array32, uint32(l)) +} + +func (e *Encoder) encodeStringSlice(s []string) error { + if s == nil { + return e.EncodeNil() + } + if err := e.EncodeArrayLen(len(s)); err != nil { + return err + } + for _, v := range s { + if err := e.EncodeString(v); err != nil { + return err + } + } + return nil +} + +func encodeSliceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return encodeArrayValue(e, v) +} + +func encodeArrayValue(e *Encoder, v reflect.Value) error { + l := v.Len() + if err := e.EncodeArrayLen(l); err != nil { + return err + } + for i := 0; i < l; i++ { + if err := e.EncodeValue(v.Index(i)); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/encode_value.go b/vendor/github.com/vmihailenco/msgpack/encode_value.go new file mode 100644 index 0000000000..b46ab02a18 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/encode_value.go @@ -0,0 +1,167 @@ +package msgpack + +import ( + "fmt" + "reflect" +) + +var valueEncoders []encoderFunc + +func init() { + valueEncoders = []encoderFunc{ + reflect.Bool: encodeBoolValue, + reflect.Int: encodeInt64CondValue, + reflect.Int8: encodeInt8CondValue, + reflect.Int16: encodeInt16CondValue, + reflect.Int32: encodeInt32CondValue, + reflect.Int64: encodeInt64CondValue, + reflect.Uint: encodeUint64CondValue, + reflect.Uint8: encodeUint8CondValue, + reflect.Uint16: encodeUint16CondValue, + reflect.Uint32: encodeUint32CondValue, + reflect.Uint64: encodeUint64CondValue, + reflect.Float32: encodeFloat32Value, + reflect.Float64: encodeFloat64Value, + reflect.Complex64: encodeUnsupportedValue, + reflect.Complex128: encodeUnsupportedValue, + reflect.Array: encodeArrayValue, + reflect.Chan: encodeUnsupportedValue, + reflect.Func: encodeUnsupportedValue, + reflect.Interface: encodeInterfaceValue, + reflect.Map: encodeMapValue, + reflect.Ptr: encodeUnsupportedValue, + reflect.Slice: encodeSliceValue, + reflect.String: encodeStringValue, + reflect.Struct: encodeStructValue, + reflect.UnsafePointer: encodeUnsupportedValue, + } +} + +func getEncoder(typ reflect.Type) encoderFunc { + if encoder, ok := typEncMap[typ]; ok { + return encoder + } + + if typ.Implements(customEncoderType) { + return encodeCustomValue + } + if typ.Implements(marshalerType) { + return marshalValue + } + + kind := typ.Kind() + + // Addressable struct field value. + if kind != reflect.Ptr { + ptr := reflect.PtrTo(typ) + if ptr.Implements(customEncoderType) { + return encodeCustomValuePtr + } + if ptr.Implements(marshalerType) { + return marshalValuePtr + } + } + + if typ == errorType { + return encodeErrorValue + } + + switch kind { + case reflect.Ptr: + return ptrEncoderFunc(typ) + case reflect.Slice: + if typ.Elem().Kind() == reflect.Uint8 { + return encodeByteSliceValue + } + case reflect.Array: + if typ.Elem().Kind() == reflect.Uint8 { + return encodeByteArrayValue + } + case reflect.Map: + if typ.Key() == stringType { + switch typ.Elem() { + case stringType: + return encodeMapStringStringValue + case interfaceType: + return encodeMapStringInterfaceValue + } + } + } + return valueEncoders[kind] +} + +func ptrEncoderFunc(typ reflect.Type) encoderFunc { + encoder := getEncoder(typ.Elem()) + return func(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return encoder(e, v.Elem()) + } +} + +func encodeCustomValuePtr(e *Encoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) + } + encoder := v.Addr().Interface().(CustomEncoder) + return encoder.EncodeMsgpack(e) +} + +func encodeCustomValue(e *Encoder, v reflect.Value) error { + switch v.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + if v.IsNil() { + return e.EncodeNil() + } + } + + encoder := v.Interface().(CustomEncoder) + return encoder.EncodeMsgpack(e) +} + +func marshalValuePtr(e *Encoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) + } + return marshalValue(e, v.Addr()) +} + +func marshalValue(e *Encoder, v reflect.Value) error { + switch v.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + if v.IsNil() { + return e.EncodeNil() + } + } + + marshaler := v.Interface().(Marshaler) + b, err := marshaler.MarshalMsgpack() + if err != nil { + return err + } + _, err = e.w.Write(b) + return err +} + +func encodeBoolValue(e *Encoder, v reflect.Value) error { + return e.EncodeBool(v.Bool()) +} + +func encodeInterfaceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return e.EncodeValue(v.Elem()) +} + +func encodeErrorValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return e.EncodeString(v.Interface().(error).Error()) +} + +func encodeUnsupportedValue(e *Encoder, v reflect.Value) error { + return fmt.Errorf("msgpack: Encode(unsupported %s)", v.Type()) +} diff --git a/vendor/github.com/vmihailenco/msgpack/ext.go b/vendor/github.com/vmihailenco/msgpack/ext.go new file mode 100644 index 0000000000..7970be8522 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/ext.go @@ -0,0 +1,238 @@ +package msgpack + +import ( + "bytes" + "fmt" + "reflect" + "sync" + + "github.com/vmihailenco/msgpack/codes" +) + +var extTypes = make(map[int8]reflect.Type) + +var bufferPool = &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, +} + +// RegisterExt records a type, identified by a value for that type, +// under the provided id. That id will identify the concrete type of a value +// sent or received as an interface variable. Only types that will be +// transferred as implementations of interface values need to be registered. +// Expecting to be used only during initialization, it panics if the mapping +// between types and ids is not a bijection. +func RegisterExt(id int8, value interface{}) { + typ := reflect.TypeOf(value) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + ptr := reflect.PtrTo(typ) + + if _, ok := extTypes[id]; ok { + panic(fmt.Errorf("msgpack: ext with id=%d is already registered", id)) + } + + registerExt(id, ptr, getEncoder(ptr), getDecoder(ptr)) + registerExt(id, typ, getEncoder(typ), getDecoder(typ)) +} + +func registerExt(id int8, typ reflect.Type, enc encoderFunc, dec decoderFunc) { + if dec != nil { + extTypes[id] = typ + } + if enc != nil { + typEncMap[typ] = makeExtEncoder(id, enc) + } + if dec != nil { + typDecMap[typ] = makeExtDecoder(id, dec) + } +} + +func (e *Encoder) EncodeExtHeader(typeId int8, length int) error { + if err := e.encodeExtLen(length); err != nil { + return err + } + if err := e.w.WriteByte(byte(typeId)); err != nil { + return err + } + return nil +} + +func makeExtEncoder(typeId int8, enc encoderFunc) encoderFunc { + return func(e *Encoder, v reflect.Value) error { + buf := bufferPool.Get().(*bytes.Buffer) + defer bufferPool.Put(buf) + buf.Reset() + + oldw := e.w + e.w = buf + err := enc(e, v) + e.w = oldw + + if err != nil { + return err + } + + err = e.EncodeExtHeader(typeId, buf.Len()) + if err != nil { + return err + } + return e.write(buf.Bytes()) + } +} + +func makeExtDecoder(typeId int8, dec decoderFunc) decoderFunc { + return func(d *Decoder, v reflect.Value) error { + c, err := d.PeekCode() + if err != nil { + return err + } + + if !codes.IsExt(c) { + return dec(d, v) + } + + id, extLen, err := d.DecodeExtHeader() + if err != nil { + return err + } + + if id != typeId { + return fmt.Errorf("msgpack: got ext type=%d, wanted %d", int8(c), typeId) + } + + d.extLen = extLen + return dec(d, v) + } +} + +func (e *Encoder) encodeExtLen(l int) error { + switch l { + case 1: + return e.writeCode(codes.FixExt1) + case 2: + return e.writeCode(codes.FixExt2) + case 4: + return e.writeCode(codes.FixExt4) + case 8: + return e.writeCode(codes.FixExt8) + case 16: + return e.writeCode(codes.FixExt16) + } + if l < 256 { + return e.write1(codes.Ext8, uint8(l)) + } + if l < 65536 { + return e.write2(codes.Ext16, uint16(l)) + } + return e.write4(codes.Ext32, uint32(l)) +} + +func (d *Decoder) parseExtLen(c codes.Code) (int, error) { + switch c { + case codes.FixExt1: + return 1, nil + case codes.FixExt2: + return 2, nil + case codes.FixExt4: + return 4, nil + case codes.FixExt8: + return 8, nil + case codes.FixExt16: + return 16, nil + case codes.Ext8: + n, err := d.uint8() + return int(n), err + case codes.Ext16: + n, err := d.uint16() + return int(n), err + case codes.Ext32: + n, err := d.uint32() + return int(n), err + default: + return 0, fmt.Errorf("msgpack: invalid code=%x decoding ext length", c) + } +} + +func (d *Decoder) decodeExtHeader(c codes.Code) (int8, int, error) { + length, err := d.parseExtLen(c) + if err != nil { + return 0, 0, err + } + + typeId, err := d.readCode() + if err != nil { + return 0, 0, err + } + + return int8(typeId), length, nil +} + +func (d *Decoder) DecodeExtHeader() (typeId int8, length int, err error) { + c, err := d.readCode() + if err != nil { + return + } + return d.decodeExtHeader(c) +} + +func (d *Decoder) extInterface(c codes.Code) (interface{}, error) { + extId, extLen, err := d.decodeExtHeader(c) + if err != nil { + return nil, err + } + + typ, ok := extTypes[extId] + if !ok { + return nil, fmt.Errorf("msgpack: unregistered ext id=%d", extId) + } + + v := reflect.New(typ) + + d.extLen = extLen + err = d.DecodeValue(v.Elem()) + d.extLen = 0 + if err != nil { + return nil, err + } + + return v.Interface(), nil +} + +func (d *Decoder) skipExt(c codes.Code) error { + n, err := d.parseExtLen(c) + if err != nil { + return err + } + return d.skipN(n + 1) +} + +func (d *Decoder) skipExtHeader(c codes.Code) error { + // Read ext type. + _, err := d.readCode() + if err != nil { + return err + } + // Read ext body len. + for i := 0; i < extHeaderLen(c); i++ { + _, err := d.readCode() + if err != nil { + return err + } + } + return nil +} + +func extHeaderLen(c codes.Code) int { + switch c { + case codes.Ext8: + return 1 + case codes.Ext16: + return 2 + case codes.Ext32: + return 4 + } + return 0 +} diff --git a/vendor/github.com/vmihailenco/msgpack/msgpack.go b/vendor/github.com/vmihailenco/msgpack/msgpack.go new file mode 100644 index 0000000000..220b43c47b --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/msgpack.go @@ -0,0 +1,17 @@ +package msgpack + +type Marshaler interface { + MarshalMsgpack() ([]byte, error) +} + +type Unmarshaler interface { + UnmarshalMsgpack([]byte) error +} + +type CustomEncoder interface { + EncodeMsgpack(*Encoder) error +} + +type CustomDecoder interface { + DecodeMsgpack(*Decoder) error +} diff --git a/vendor/github.com/vmihailenco/msgpack/tag.go b/vendor/github.com/vmihailenco/msgpack/tag.go new file mode 100644 index 0000000000..48e6f942c0 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/tag.go @@ -0,0 +1,42 @@ +package msgpack + +import ( + "strings" +) + +type tagOptions string + +func (o tagOptions) Get(name string) (string, bool) { + s := string(o) + for len(s) > 0 { + var next string + idx := strings.IndexByte(s, ',') + if idx >= 0 { + s, next = s[:idx], s[idx+1:] + } + if strings.HasPrefix(s, name) { + return s[len(name):], true + } + s = next + } + return "", false +} + +func (o tagOptions) Contains(name string) bool { + _, ok := o.Get(name) + return ok +} + +func parseTag(tag string) (string, tagOptions) { + if idx := strings.IndexByte(tag, ','); idx != -1 { + name := tag[:idx] + if strings.IndexByte(name, ':') == -1 { + return name, tagOptions(tag[idx+1:]) + } + } + + if strings.IndexByte(tag, ':') == -1 { + return tag, "" + } + return "", tagOptions(tag) +} diff --git a/vendor/github.com/vmihailenco/msgpack/time.go b/vendor/github.com/vmihailenco/msgpack/time.go new file mode 100644 index 0000000000..f7409690a1 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/time.go @@ -0,0 +1,145 @@ +package msgpack + +import ( + "encoding/binary" + "fmt" + "reflect" + "time" + + "github.com/vmihailenco/msgpack/codes" +) + +var timeExtId int8 = -1 + +func init() { + timeType := reflect.TypeOf((*time.Time)(nil)).Elem() + registerExt(timeExtId, timeType, encodeTimeValue, decodeTimeValue) +} + +func (e *Encoder) EncodeTime(tm time.Time) error { + b := e.encodeTime(tm) + if err := e.encodeExtLen(len(b)); err != nil { + return err + } + if err := e.w.WriteByte(byte(timeExtId)); err != nil { + return err + } + return e.write(b) +} + +func (e *Encoder) encodeTime(tm time.Time) []byte { + secs := uint64(tm.Unix()) + if secs>>34 == 0 { + data := uint64(tm.Nanosecond())<<34 | secs + if data&0xffffffff00000000 == 0 { + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, uint32(data)) + return b + } else { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, data) + return b + } + } + + b := make([]byte, 12) + binary.BigEndian.PutUint32(b, uint32(tm.Nanosecond())) + binary.BigEndian.PutUint64(b[4:], uint64(secs)) + return b +} + +func (d *Decoder) DecodeTime() (time.Time, error) { + tm, err := d.decodeTime() + if err != nil { + return tm, err + } + + if tm.IsZero() { + // Assume that zero time does not have timezone information. + return tm.UTC(), nil + } + return tm, nil +} + +func (d *Decoder) decodeTime() (time.Time, error) { + extLen := d.extLen + d.extLen = 0 + if extLen == 0 { + c, err := d.readCode() + if err != nil { + return time.Time{}, err + } + + // Legacy format. + if c == codes.FixedArrayLow|2 { + sec, err := d.DecodeInt64() + if err != nil { + return time.Time{}, err + } + + nsec, err := d.DecodeInt64() + if err != nil { + return time.Time{}, err + } + + return time.Unix(sec, nsec), nil + } + + if codes.IsString(c) { + s, err := d.string(c) + if err != nil { + return time.Time{}, err + } + return time.Parse(time.RFC3339Nano, s) + } + + extLen, err = d.parseExtLen(c) + if err != nil { + return time.Time{}, err + } + + // Skip ext id. + _, err = d.s.ReadByte() + if err != nil { + return time.Time{}, nil + } + } + + b, err := d.readN(extLen) + if err != nil { + return time.Time{}, err + } + + switch len(b) { + case 4: + sec := binary.BigEndian.Uint32(b) + return time.Unix(int64(sec), 0), nil + case 8: + sec := binary.BigEndian.Uint64(b) + nsec := int64(sec >> 34) + sec &= 0x00000003ffffffff + return time.Unix(int64(sec), nsec), nil + case 12: + nsec := binary.BigEndian.Uint32(b) + sec := binary.BigEndian.Uint64(b[4:]) + return time.Unix(int64(sec), int64(nsec)), nil + default: + err = fmt.Errorf("msgpack: invalid ext len=%d decoding time", extLen) + return time.Time{}, err + } +} + +func encodeTimeValue(e *Encoder, v reflect.Value) error { + tm := v.Interface().(time.Time) + b := e.encodeTime(tm) + return e.write(b) +} + +func decodeTimeValue(d *Decoder, v reflect.Value) error { + tm, err := d.DecodeTime() + if err != nil { + return err + } + v.Set(reflect.ValueOf(tm)) + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/types.go b/vendor/github.com/vmihailenco/msgpack/types.go new file mode 100644 index 0000000000..6a1bf7f913 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/types.go @@ -0,0 +1,310 @@ +package msgpack + +import ( + "reflect" + "sync" +) + +var errorType = reflect.TypeOf((*error)(nil)).Elem() + +var customEncoderType = reflect.TypeOf((*CustomEncoder)(nil)).Elem() +var customDecoderType = reflect.TypeOf((*CustomDecoder)(nil)).Elem() + +var marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() +var unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + +type encoderFunc func(*Encoder, reflect.Value) error +type decoderFunc func(*Decoder, reflect.Value) error + +var typEncMap = make(map[reflect.Type]encoderFunc) +var typDecMap = make(map[reflect.Type]decoderFunc) + +// Register registers encoder and decoder functions for a value. +// This is low level API and in most cases you should prefer implementing +// Marshaler/CustomEncoder and Unmarshaler/CustomDecoder interfaces. +func Register(value interface{}, enc encoderFunc, dec decoderFunc) { + typ := reflect.TypeOf(value) + if enc != nil { + typEncMap[typ] = enc + } + if dec != nil { + typDecMap[typ] = dec + } +} + +//------------------------------------------------------------------------------ + +var structs = newStructCache(false) +var jsonStructs = newStructCache(true) + +type structCache struct { + mu sync.RWMutex + m map[reflect.Type]*fields + + useJSONTag bool +} + +func newStructCache(useJSONTag bool) *structCache { + return &structCache{ + m: make(map[reflect.Type]*fields), + + useJSONTag: useJSONTag, + } +} + +func (m *structCache) Fields(typ reflect.Type) *fields { + m.mu.RLock() + fs, ok := m.m[typ] + m.mu.RUnlock() + if ok { + return fs + } + + m.mu.Lock() + fs, ok = m.m[typ] + if !ok { + fs = getFields(typ, m.useJSONTag) + m.m[typ] = fs + } + m.mu.Unlock() + + return fs +} + +//------------------------------------------------------------------------------ + +type field struct { + name string + index []int + omitEmpty bool + encoder encoderFunc + decoder decoderFunc +} + +func (f *field) value(v reflect.Value) reflect.Value { + return fieldByIndex(v, f.index) +} + +func (f *field) Omit(strct reflect.Value) bool { + return f.omitEmpty && isEmptyValue(f.value(strct)) +} + +func (f *field) EncodeValue(e *Encoder, strct reflect.Value) error { + return f.encoder(e, f.value(strct)) +} + +func (f *field) DecodeValue(d *Decoder, strct reflect.Value) error { + return f.decoder(d, f.value(strct)) +} + +//------------------------------------------------------------------------------ + +type fields struct { + Table map[string]*field + List []*field + AsArray bool + + hasOmitEmpty bool +} + +func newFields(numField int) *fields { + return &fields{ + Table: make(map[string]*field, numField), + List: make([]*field, 0, numField), + } +} + +func (fs *fields) Add(field *field) { + fs.Table[field.name] = field + fs.List = append(fs.List, field) + if field.omitEmpty { + fs.hasOmitEmpty = true + } +} + +func (fs *fields) OmitEmpty(strct reflect.Value) []*field { + if !fs.hasOmitEmpty { + return fs.List + } + + fields := make([]*field, 0, len(fs.List)) + for _, f := range fs.List { + if !f.Omit(strct) { + fields = append(fields, f) + } + } + return fields +} + +func getFields(typ reflect.Type, useJSONTag bool) *fields { + numField := typ.NumField() + fs := newFields(numField) + + var omitEmpty bool + for i := 0; i < numField; i++ { + f := typ.Field(i) + + tag := f.Tag.Get("msgpack") + if useJSONTag && tag == "" { + tag = f.Tag.Get("json") + } + + name, opt := parseTag(tag) + if name == "-" { + continue + } + + if f.Name == "_msgpack" { + if opt.Contains("asArray") { + fs.AsArray = true + } + if opt.Contains("omitempty") { + omitEmpty = true + } + } + + if f.PkgPath != "" && !f.Anonymous { + continue + } + + field := &field{ + name: name, + index: f.Index, + omitEmpty: omitEmpty || opt.Contains("omitempty"), + encoder: getEncoder(f.Type), + decoder: getDecoder(f.Type), + } + + if field.name == "" { + field.name = f.Name + } + + if f.Anonymous && !opt.Contains("noinline") { + inline := opt.Contains("inline") + if inline { + inlineFields(fs, f.Type, field, useJSONTag) + } else { + inline = autoinlineFields(fs, f.Type, field, useJSONTag) + } + if inline { + fs.Table[field.name] = field + continue + } + } + + fs.Add(field) + } + return fs +} + +var encodeStructValuePtr uintptr +var decodeStructValuePtr uintptr + +func init() { + encodeStructValuePtr = reflect.ValueOf(encodeStructValue).Pointer() + decodeStructValuePtr = reflect.ValueOf(decodeStructValue).Pointer() +} + +func inlineFields(fs *fields, typ reflect.Type, f *field, useJSONTag bool) { + inlinedFields := getFields(typ, useJSONTag).List + for _, field := range inlinedFields { + if _, ok := fs.Table[field.name]; ok { + // Don't inline shadowed fields. + continue + } + field.index = append(f.index, field.index...) + fs.Add(field) + } +} + +func autoinlineFields(fs *fields, typ reflect.Type, f *field, useJSONTag bool) bool { + var encoder encoderFunc + var decoder decoderFunc + + if typ.Kind() == reflect.Struct { + encoder = f.encoder + decoder = f.decoder + } else { + for typ.Kind() == reflect.Ptr { + typ = typ.Elem() + encoder = getEncoder(typ) + decoder = getDecoder(typ) + } + if typ.Kind() != reflect.Struct { + return false + } + } + + if reflect.ValueOf(encoder).Pointer() != encodeStructValuePtr { + return false + } + if reflect.ValueOf(decoder).Pointer() != decodeStructValuePtr { + return false + } + + inlinedFields := getFields(typ, useJSONTag).List + for _, field := range inlinedFields { + if _, ok := fs.Table[field.name]; ok { + // Don't auto inline if there are shadowed fields. + return false + } + } + + for _, field := range inlinedFields { + field.index = append(f.index, field.index...) + fs.Add(field) + } + return true +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func fieldByIndex(v reflect.Value, index []int) reflect.Value { + if len(index) == 1 { + return v.Field(index[0]) + } + for i, x := range index { + if i > 0 { + var ok bool + v, ok = indirectNew(v) + if !ok { + return v + } + } + v = v.Field(x) + } + return v +} + +func indirectNew(v reflect.Value) (reflect.Value, bool) { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + if !v.CanSet() { + return v, false + } + elemType := v.Type().Elem() + if elemType.Kind() != reflect.Struct { + return v, false + } + v.Set(reflect.New(elemType)) + } + v = v.Elem() + } + return v, true +} diff --git a/vendor/github.com/zclconf/go-cty/cty/capsule.go b/vendor/github.com/zclconf/go-cty/cty/capsule.go index 4fce92add1..d273d14833 100644 --- a/vendor/github.com/zclconf/go-cty/cty/capsule.go +++ b/vendor/github.com/zclconf/go-cty/cty/capsule.go @@ -19,7 +19,7 @@ func (t *capsuleType) Equals(other Type) bool { return false } -func (t *capsuleType) FriendlyName() string { +func (t *capsuleType) FriendlyName(mode friendlyTypeNameMode) string { return t.Name } diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go index 7bfcc081cc..f9aacb4ee7 100644 --- a/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go @@ -17,6 +17,10 @@ func getConversion(in cty.Type, out cty.Type, unsafe bool) conversion { // Wrap the conversion in some standard checks that we don't want to // have to repeat in every conversion function. return func(in cty.Value, path cty.Path) (cty.Value, error) { + if out == cty.DynamicPseudoType { + // Conversion to DynamicPseudoType always just passes through verbatim. + return in, nil + } if !in.IsKnown() { return cty.UnknownVal(out), nil } @@ -57,6 +61,12 @@ func getConversionKnown(in cty.Type, out cty.Type, unsafe bool) conversion { } return nil + case out.IsObjectType() && in.IsObjectType(): + return conversionObjectToObject(in, out, unsafe) + + case out.IsTupleType() && in.IsTupleType(): + return conversionTupleToTuple(in, out, unsafe) + case out.IsListType() && (in.IsListType() || in.IsSetType()): inEty := in.ElementType() outEty := out.ElementType() @@ -93,10 +103,23 @@ func getConversionKnown(in cty.Type, out cty.Type, unsafe bool) conversion { } return conversionCollectionToSet(outEty, convEty) + case out.IsMapType() && in.IsMapType(): + inEty := in.ElementType() + outEty := out.ElementType() + convEty := getConversion(inEty, outEty, unsafe) + if convEty == nil { + return nil + } + return conversionCollectionToMap(outEty, convEty) + case out.IsListType() && in.IsTupleType(): outEty := out.ElementType() return conversionTupleToList(in, outEty, unsafe) + case out.IsSetType() && in.IsTupleType(): + outEty := out.ElementType() + return conversionTupleToSet(in, outEty, unsafe) + case out.IsMapType() && in.IsObjectType(): outEty := out.ElementType() return conversionObjectToMap(in, outEty, unsafe) diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go index eace85d3dd..c2ac14ecfb 100644 --- a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go @@ -84,6 +84,120 @@ func conversionCollectionToSet(ety cty.Type, conv conversion) conversion { } } +// conversionCollectionToMap returns a conversion that will apply the given +// conversion to all of the elements of a collection (something that supports +// ForEachElement and LengthInt) and then returns the result as a map. +// +// "conv" can be nil if the elements are expected to already be of the +// correct type and just need to be re-wrapped into a map. +func conversionCollectionToMap(ety cty.Type, conv conversion) conversion { + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make(map[string]cty.Value, 0) + path = append(path, nil) + it := val.ElementIterator() + for it.Next() { + key, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: key, + } + + keyStr, err := Convert(key, cty.String) + if err != nil { + // Should never happen, because keys can only be numbers or + // strings and both can convert to string. + return cty.DynamicVal, path.NewErrorf("cannot convert key type %s to string for map", key.Type().FriendlyName()) + } + + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + + elems[keyStr.AsString()] = val + } + + if len(elems) == 0 { + return cty.MapValEmpty(ety), nil + } + + return cty.MapVal(elems), nil + } +} + +// conversionTupleToSet returns a conversion that will take a value of the +// given tuple type and return a set of the given element type. +// +// Will panic if the given tupleType isn't actually a tuple type. +func conversionTupleToSet(tupleType cty.Type, listEty cty.Type, unsafe bool) conversion { + tupleEtys := tupleType.TupleElementTypes() + + if len(tupleEtys) == 0 { + // Empty tuple short-circuit + return func(val cty.Value, path cty.Path) (cty.Value, error) { + return cty.ListValEmpty(listEty), nil + } + } + + if listEty == cty.DynamicPseudoType { + // This is a special case where the caller wants us to find + // a suitable single type that all elements can convert to, if + // possible. + listEty, _ = unify(tupleEtys, unsafe) + if listEty == cty.NilType { + return nil + } + } + + elemConvs := make([]conversion, len(tupleEtys)) + for i, tupleEty := range tupleEtys { + if tupleEty.Equals(listEty) { + // no conversion required + continue + } + + elemConvs[i] = getConversion(tupleEty, listEty, unsafe) + if elemConvs[i] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, len(elemConvs)) + path = append(path, nil) + i := int64(0) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + conv := elemConvs[i] + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + return cty.SetVal(elems), nil + } +} + // conversionTupleToList returns a conversion that will take a value of the // given tuple type and return a list of the given element type. // diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_object.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_object.go new file mode 100644 index 0000000000..62dabb8d1b --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_object.go @@ -0,0 +1,76 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// conversionObjectToObject returns a conversion that will make the input +// object type conform to the output object type, if possible. +// +// Conversion is possible only if the output type is a subset of the input +// type, meaning that each attribute of the output type has a corresponding +// attribute in the input type where a recursive conversion is available. +// +// Shallow object conversions work the same for both safe and unsafe modes, +// but the safety flag is passed on to recursive conversions and may thus +// limit the above definition of "subset". +func conversionObjectToObject(in, out cty.Type, unsafe bool) conversion { + inAtys := in.AttributeTypes() + outAtys := out.AttributeTypes() + attrConvs := make(map[string]conversion) + + for name, outAty := range outAtys { + inAty, exists := inAtys[name] + if !exists { + // No conversion is available, then. + return nil + } + + if inAty.Equals(outAty) { + // No conversion needed, but we'll still record the attribute + // in our map for later reference. + attrConvs[name] = nil + continue + } + + attrConvs[name] = getConversion(inAty, outAty, unsafe) + if attrConvs[name] == nil { + // If a recursive conversion isn't available, then our top-level + // configuration is impossible too. + return nil + } + } + + // If we get here then a conversion is possible, using the attribute + // conversions given in attrConvs. + return func(val cty.Value, path cty.Path) (cty.Value, error) { + attrVals := make(map[string]cty.Value, len(attrConvs)) + path = append(path, nil) + pathStep := &path[len(path)-1] + + for it := val.ElementIterator(); it.Next(); { + nameVal, val := it.Element() + var err error + + name := nameVal.AsString() + *pathStep = cty.GetAttrStep{ + Name: name, + } + + conv, exists := attrConvs[name] + if !exists { + continue + } + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + + attrVals[name] = val + } + + return cty.ObjectVal(attrVals), nil + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_primitive.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_primitive.go index e563ee3483..e0dbf491e0 100644 --- a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_primitive.go +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_primitive.go @@ -1,8 +1,6 @@ package convert import ( - "math/big" - "github.com/zclconf/go-cty/cty" ) @@ -30,11 +28,11 @@ var primitiveConversionsSafe = map[cty.Type]map[cty.Type]conversion{ var primitiveConversionsUnsafe = map[cty.Type]map[cty.Type]conversion{ cty.String: { cty.Number: func(val cty.Value, path cty.Path) (cty.Value, error) { - f, _, err := big.ParseFloat(val.AsString(), 10, 512, big.ToNearestEven) + v, err := cty.ParseNumberVal(val.AsString()) if err != nil { return cty.NilVal, path.NewErrorf("a number is required") } - return cty.NumberVal(f), nil + return v, nil }, cty.Bool: func(val cty.Value, path cty.Path) (cty.Value, error) { switch val.AsString() { diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_tuple.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_tuple.go new file mode 100644 index 0000000000..592980a701 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_tuple.go @@ -0,0 +1,71 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// conversionTupleToTuple returns a conversion that will make the input +// tuple type conform to the output tuple type, if possible. +// +// Conversion is possible only if the two tuple types have the same number +// of elements and the corresponding elements by index can be converted. +// +// Shallow tuple conversions work the same for both safe and unsafe modes, +// but the safety flag is passed on to recursive conversions and may thus +// limit which element type conversions are possible. +func conversionTupleToTuple(in, out cty.Type, unsafe bool) conversion { + inEtys := in.TupleElementTypes() + outEtys := out.TupleElementTypes() + + if len(inEtys) != len(outEtys) { + return nil // no conversion is possible + } + + elemConvs := make([]conversion, len(inEtys)) + + for i, outEty := range outEtys { + inEty := inEtys[i] + + if inEty.Equals(outEty) { + // No conversion needed, so we can leave this one nil. + continue + } + + elemConvs[i] = getConversion(inEty, outEty, unsafe) + if elemConvs[i] == nil { + // If a recursive conversion isn't available, then our top-level + // configuration is impossible too. + return nil + } + } + + // If we get here then a conversion is possible, using the element + // conversions given in elemConvs. + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elemVals := make([]cty.Value, len(elemConvs)) + path = append(path, nil) + pathStep := &path[len(path)-1] + + i := 0 + for it := val.ElementIterator(); it.Next(); i++ { + _, val := it.Element() + var err error + + *pathStep = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + conv := elemConvs[i] + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + + elemVals[i] = val + } + + return cty.TupleVal(elemVals), nil + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/mismatch_msg.go b/vendor/github.com/zclconf/go-cty/cty/convert/mismatch_msg.go new file mode 100644 index 0000000000..581304ecd5 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/mismatch_msg.go @@ -0,0 +1,220 @@ +package convert + +import ( + "bytes" + "fmt" + "sort" + + "github.com/zclconf/go-cty/cty" +) + +// MismatchMessage is a helper to return an English-language description of +// the differences between got and want, phrased as a reason why got does +// not conform to want. +// +// This function does not itself attempt conversion, and so it should generally +// be used only after a conversion has failed, to report the conversion failure +// to an English-speaking user. The result will be confusing got is actually +// conforming to or convertable to want. +// +// The shorthand helper function Convert uses this function internally to +// produce its error messages, so callers of that function do not need to +// also use MismatchMessage. +// +// This function is similar to Type.TestConformance, but it is tailored to +// describing conversion failures and so the messages it generates relate +// specifically to the conversion rules implemented in this package. +func MismatchMessage(got, want cty.Type) string { + switch { + + case got.IsObjectType() && want.IsObjectType(): + // If both types are object types then we may be able to say something + // about their respective attributes. + return mismatchMessageObjects(got, want) + + case got.IsTupleType() && want.IsListType() && want.ElementType() == cty.DynamicPseudoType: + // If conversion from tuple to list failed then it's because we couldn't + // find a common type to convert all of the tuple elements to. + return "all list elements must have the same type" + + case got.IsTupleType() && want.IsSetType() && want.ElementType() == cty.DynamicPseudoType: + // If conversion from tuple to set failed then it's because we couldn't + // find a common type to convert all of the tuple elements to. + return "all set elements must have the same type" + + case got.IsObjectType() && want.IsMapType() && want.ElementType() == cty.DynamicPseudoType: + // If conversion from object to map failed then it's because we couldn't + // find a common type to convert all of the object attributes to. + return "all map elements must have the same type" + + case (got.IsTupleType() || got.IsObjectType()) && want.IsCollectionType(): + return mismatchMessageCollectionsFromStructural(got, want) + + case got.IsCollectionType() && want.IsCollectionType(): + return mismatchMessageCollectionsFromCollections(got, want) + + default: + // If we have nothing better to say, we'll just state what was required. + return want.FriendlyNameForConstraint() + " required" + } +} + +func mismatchMessageObjects(got, want cty.Type) string { + // Per our conversion rules, "got" is allowed to be a superset of "want", + // and so we'll produce error messages here under that assumption. + gotAtys := got.AttributeTypes() + wantAtys := want.AttributeTypes() + + // If we find missing attributes then we'll report those in preference, + // but if not then we will report a maximum of one non-conforming + // attribute, just to keep our messages relatively terse. + // We'll also prefer to report a recursive type error from an _unsafe_ + // conversion over a safe one, because these are subjectively more + // "serious". + var missingAttrs []string + var unsafeMismatchAttr string + var safeMismatchAttr string + + for name, wantAty := range wantAtys { + gotAty, exists := gotAtys[name] + if !exists { + missingAttrs = append(missingAttrs, name) + continue + } + + // We'll now try to convert these attributes in isolation and + // see if we have a nested conversion error to report. + // We'll try an unsafe conversion first, and then fall back on + // safe if unsafe is possible. + + // If we already have an unsafe mismatch attr error then we won't bother + // hunting for another one. + if unsafeMismatchAttr != "" { + continue + } + if conv := GetConversionUnsafe(gotAty, wantAty); conv == nil { + unsafeMismatchAttr = fmt.Sprintf("attribute %q: %s", name, MismatchMessage(gotAty, wantAty)) + } + + // If we already have a safe mismatch attr error then we won't bother + // hunting for another one. + if safeMismatchAttr != "" { + continue + } + if conv := GetConversion(gotAty, wantAty); conv == nil { + safeMismatchAttr = fmt.Sprintf("attribute %q: %s", name, MismatchMessage(gotAty, wantAty)) + } + } + + // We should now have collected at least one problem. If we have more than + // one then we'll use our preference order to decide what is most important + // to report. + switch { + + case len(missingAttrs) != 0: + sort.Strings(missingAttrs) + switch len(missingAttrs) { + case 1: + return fmt.Sprintf("attribute %q is required", missingAttrs[0]) + case 2: + return fmt.Sprintf("attributes %q and %q are required", missingAttrs[0], missingAttrs[1]) + default: + sort.Strings(missingAttrs) + var buf bytes.Buffer + for _, name := range missingAttrs[:len(missingAttrs)-1] { + fmt.Fprintf(&buf, "%q, ", name) + } + fmt.Fprintf(&buf, "and %q", missingAttrs[len(missingAttrs)-1]) + return fmt.Sprintf("attributes %s are required", buf.Bytes()) + } + + case unsafeMismatchAttr != "": + return unsafeMismatchAttr + + case safeMismatchAttr != "": + return safeMismatchAttr + + default: + // We should never get here, but if we do then we'll return + // just a generic message. + return "incorrect object attributes" + } +} + +func mismatchMessageCollectionsFromStructural(got, want cty.Type) string { + // First some straightforward cases where the kind is just altogether wrong. + switch { + case want.IsListType() && !got.IsTupleType(): + return want.FriendlyNameForConstraint() + " required" + case want.IsSetType() && !got.IsTupleType(): + return want.FriendlyNameForConstraint() + " required" + case want.IsMapType() && !got.IsObjectType(): + return want.FriendlyNameForConstraint() + " required" + } + + // If the kinds are matched well enough then we'll move on to checking + // individual elements. + wantEty := want.ElementType() + switch { + case got.IsTupleType(): + for i, gotEty := range got.TupleElementTypes() { + if gotEty.Equals(wantEty) { + continue // exact match, so no problem + } + if conv := getConversion(gotEty, wantEty, true); conv != nil { + continue // conversion is available, so no problem + } + return fmt.Sprintf("element %d: %s", i, MismatchMessage(gotEty, wantEty)) + } + + // If we get down here then something weird is going on but we'll + // return a reasonable fallback message anyway. + return fmt.Sprintf("all elements must be %s", wantEty.FriendlyNameForConstraint()) + + case got.IsObjectType(): + for name, gotAty := range got.AttributeTypes() { + if gotAty.Equals(wantEty) { + continue // exact match, so no problem + } + if conv := getConversion(gotAty, wantEty, true); conv != nil { + continue // conversion is available, so no problem + } + return fmt.Sprintf("element %q: %s", name, MismatchMessage(gotAty, wantEty)) + } + + // If we get down here then something weird is going on but we'll + // return a reasonable fallback message anyway. + return fmt.Sprintf("all elements must be %s", wantEty.FriendlyNameForConstraint()) + + default: + // Should not be possible to get here since we only call this function + // with got as structural types, but... + return want.FriendlyNameForConstraint() + " required" + } +} + +func mismatchMessageCollectionsFromCollections(got, want cty.Type) string { + // First some straightforward cases where the kind is just altogether wrong. + switch { + case want.IsListType() && !(got.IsListType() || got.IsSetType()): + return want.FriendlyNameForConstraint() + " required" + case want.IsSetType() && !(got.IsListType() || got.IsSetType()): + return want.FriendlyNameForConstraint() + " required" + case want.IsMapType() && !got.IsMapType(): + return want.FriendlyNameForConstraint() + " required" + } + + // If the kinds are matched well enough then we'll check the element types. + gotEty := got.ElementType() + wantEty := want.ElementType() + noun := "element type" + switch { + case want.IsListType(): + noun = "list element type" + case want.IsSetType(): + noun = "set element type" + case want.IsMapType(): + noun = "map element type" + } + return fmt.Sprintf("incorrect %s: %s", noun, MismatchMessage(gotEty, wantEty)) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/public.go b/vendor/github.com/zclconf/go-cty/cty/convert/public.go index 55f44aeca0..af19bdc501 100644 --- a/vendor/github.com/zclconf/go-cty/cty/convert/public.go +++ b/vendor/github.com/zclconf/go-cty/cty/convert/public.go @@ -1,7 +1,7 @@ package convert import ( - "fmt" + "errors" "github.com/zclconf/go-cty/cty" ) @@ -46,7 +46,7 @@ func Convert(in cty.Value, want cty.Type) (cty.Value, error) { conv := GetConversionUnsafe(in.Type(), want) if conv == nil { - return cty.NilVal, fmt.Errorf("incorrect type; %s required", want.FriendlyName()) + return cty.NilVal, errors.New(MismatchMessage(in.Type(), want)) } return conv(in) } diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/unify.go b/vendor/github.com/zclconf/go-cty/cty/convert/unify.go index bd6736b4dc..53ebbfe08a 100644 --- a/vendor/github.com/zclconf/go-cty/cty/convert/unify.go +++ b/vendor/github.com/zclconf/go-cty/cty/convert/unify.go @@ -21,6 +21,39 @@ func unify(types []cty.Type, unsafe bool) (cty.Type, []Conversion) { return cty.NilType, nil } + // If all of the given types are of the same structural kind, we may be + // able to construct a new type that they can all be unified to, even if + // that is not one of the given types. We must try this before the general + // behavior below because in unsafe mode we can convert an object type to + // a subset of that type, which would be a much less useful conversion for + // unification purposes. + { + objectCt := 0 + tupleCt := 0 + dynamicCt := 0 + for _, ty := range types { + switch { + case ty.IsObjectType(): + objectCt++ + case ty.IsTupleType(): + tupleCt++ + case ty == cty.DynamicPseudoType: + dynamicCt++ + default: + break + } + } + switch { + case objectCt > 0 && (objectCt+dynamicCt) == len(types): + return unifyObjectTypes(types, unsafe, dynamicCt > 0) + case tupleCt > 0 && (tupleCt+dynamicCt) == len(types): + return unifyTupleTypes(types, unsafe, dynamicCt > 0) + case objectCt > 0 && tupleCt > 0: + // Can never unify object and tuple types since they have incompatible kinds + return cty.NilType, nil + } + } + prefOrder := sortTypes(types) // sortTypes gives us an order where earlier items are preferable as @@ -58,9 +91,224 @@ Preferences: return wantType, conversions } - // TODO: For structural types, try to invent a new type that they - // can all be unified to, by unifying their respective attributes. - // If we fall out here, no unification is possible return cty.NilType, nil } + +func unifyObjectTypes(types []cty.Type, unsafe bool, hasDynamic bool) (cty.Type, []Conversion) { + // If we had any dynamic types in the input here then we can't predict + // what path we'll take through here once these become known types, so + // we'll conservatively produce DynamicVal for these. + if hasDynamic { + return unifyAllAsDynamic(types) + } + + // There are two different ways we can succeed here: + // - If all of the given object types have the same set of attribute names + // and the corresponding types are all unifyable, then we construct that + // type. + // - If the given object types have different attribute names or their + // corresponding types are not unifyable, we'll instead try to unify + // all of the attribute types together to produce a map type. + // + // Our unification behavior is intentionally stricter than our conversion + // behavior for subset object types because user intent is different with + // unification use-cases: it makes sense to allow {"foo":true} to convert + // to emptyobjectval, but unifying an object with an attribute with the + // empty object type should be an error because unifying to the empty + // object type would be suprising and useless. + + firstAttrs := types[0].AttributeTypes() + for _, ty := range types[1:] { + thisAttrs := ty.AttributeTypes() + if len(thisAttrs) != len(firstAttrs) { + // If number of attributes is different then there can be no + // object type in common. + return unifyObjectTypesToMap(types, unsafe) + } + for name := range thisAttrs { + if _, ok := firstAttrs[name]; !ok { + // If attribute names don't exactly match then there can be + // no object type in common. + return unifyObjectTypesToMap(types, unsafe) + } + } + } + + // If we get here then we've proven that all of the given object types + // have exactly the same set of attribute names, though the types may + // differ. + retAtys := make(map[string]cty.Type) + atysAcross := make([]cty.Type, len(types)) + for name := range firstAttrs { + for i, ty := range types { + atysAcross[i] = ty.AttributeType(name) + } + retAtys[name], _ = unify(atysAcross, unsafe) + if retAtys[name] == cty.NilType { + // Cannot unify this attribute alone, which means that unification + // of everything down to a map type can't be possible either. + return cty.NilType, nil + } + } + retTy := cty.Object(retAtys) + + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + // Shouldn't be reachable, since we were able to unify + return unifyObjectTypesToMap(types, unsafe) + } + } + + return retTy, conversions +} + +func unifyObjectTypesToMap(types []cty.Type, unsafe bool) (cty.Type, []Conversion) { + // This is our fallback case for unifyObjectTypes, where we see if we can + // construct a map type that can accept all of the attribute types. + + var atys []cty.Type + for _, ty := range types { + for _, aty := range ty.AttributeTypes() { + atys = append(atys, aty) + } + } + + ety, _ := unify(atys, unsafe) + if ety == cty.NilType { + return cty.NilType, nil + } + + retTy := cty.Map(ety) + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + return cty.NilType, nil + } + } + return retTy, conversions +} + +func unifyTupleTypes(types []cty.Type, unsafe bool, hasDynamic bool) (cty.Type, []Conversion) { + // If we had any dynamic types in the input here then we can't predict + // what path we'll take through here once these become known types, so + // we'll conservatively produce DynamicVal for these. + if hasDynamic { + return unifyAllAsDynamic(types) + } + + // There are two different ways we can succeed here: + // - If all of the given tuple types have the same sequence of element types + // and the corresponding types are all unifyable, then we construct that + // type. + // - If the given tuple types have different element types or their + // corresponding types are not unifyable, we'll instead try to unify + // all of the elements types together to produce a list type. + + firstEtys := types[0].TupleElementTypes() + for _, ty := range types[1:] { + thisEtys := ty.TupleElementTypes() + if len(thisEtys) != len(firstEtys) { + // If number of elements is different then there can be no + // tuple type in common. + return unifyTupleTypesToList(types, unsafe) + } + } + + // If we get here then we've proven that all of the given tuple types + // have the same number of elements, though the types may differ. + retEtys := make([]cty.Type, len(firstEtys)) + atysAcross := make([]cty.Type, len(types)) + for idx := range firstEtys { + for tyI, ty := range types { + atysAcross[tyI] = ty.TupleElementTypes()[idx] + } + retEtys[idx], _ = unify(atysAcross, unsafe) + if retEtys[idx] == cty.NilType { + // Cannot unify this element alone, which means that unification + // of everything down to a map type can't be possible either. + return cty.NilType, nil + } + } + retTy := cty.Tuple(retEtys) + + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + // Shouldn't be reachable, since we were able to unify + return unifyTupleTypesToList(types, unsafe) + } + } + + return retTy, conversions +} + +func unifyTupleTypesToList(types []cty.Type, unsafe bool) (cty.Type, []Conversion) { + // This is our fallback case for unifyTupleTypes, where we see if we can + // construct a list type that can accept all of the element types. + + var etys []cty.Type + for _, ty := range types { + for _, ety := range ty.TupleElementTypes() { + etys = append(etys, ety) + } + } + + ety, _ := unify(etys, unsafe) + if ety == cty.NilType { + return cty.NilType, nil + } + + retTy := cty.List(ety) + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + // Shouldn't be reachable, since we were able to unify + return unifyObjectTypesToMap(types, unsafe) + } + } + return retTy, conversions +} + +func unifyAllAsDynamic(types []cty.Type) (cty.Type, []Conversion) { + conversions := make([]Conversion, len(types)) + for i := range conversions { + conversions[i] = func(cty.Value) (cty.Value, error) { + return cty.DynamicVal, nil + } + } + return cty.DynamicPseudoType, conversions +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/doc.go b/vendor/github.com/zclconf/go-cty/cty/function/doc.go index ac1dd8ce74..393b3110bc 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/doc.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/doc.go @@ -1,6 +1,6 @@ // Package function builds on the functionality of cty by modeling functions // that operate on cty Values. // -// Functions are, at their call, Go anonymous functions. However, this package +// Functions are, at their core, Go anonymous functions. However, this package // wraps around them utility functions for parameter type checking, etc. package function diff --git a/vendor/github.com/zclconf/go-cty/cty/function/function.go b/vendor/github.com/zclconf/go-cty/cty/function/function.go index 162f7bfcde..9e8bf3376a 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/function.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/function.go @@ -143,7 +143,7 @@ func (f Function) ReturnTypeForValues(args []cty.Value) (ty cty.Type, err error) val := posArgs[i] if val.IsNull() && !spec.AllowNull { - return cty.Type{}, NewArgErrorf(i, "must not be null") + return cty.Type{}, NewArgErrorf(i, "argument must not be null") } // AllowUnknown is ignored for type-checking, since we expect to be @@ -169,7 +169,7 @@ func (f Function) ReturnTypeForValues(args []cty.Value) (ty cty.Type, err error) realI := i + len(posArgs) if val.IsNull() && !spec.AllowNull { - return cty.Type{}, NewArgErrorf(realI, "must not be null") + return cty.Type{}, NewArgErrorf(realI, "argument must not be null") } if val.Type() == cty.DynamicPseudoType { diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime.go new file mode 100644 index 0000000000..aa15b7bde9 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime.go @@ -0,0 +1,385 @@ +package stdlib + +import ( + "bufio" + "bytes" + "fmt" + "strings" + "time" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +var FormatDateFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "format", + Type: cty.String, + }, + { + Name: "time", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + formatStr := args[0].AsString() + timeStr := args[1].AsString() + t, err := parseTimestamp(timeStr) + if err != nil { + return cty.DynamicVal, function.NewArgError(1, err) + } + + var buf bytes.Buffer + sc := bufio.NewScanner(strings.NewReader(formatStr)) + sc.Split(splitDateFormat) + const esc = '\'' + for sc.Scan() { + tok := sc.Bytes() + + // The leading byte signals the token type + switch { + case tok[0] == esc: + if tok[len(tok)-1] != esc || len(tok) == 1 { + return cty.DynamicVal, function.NewArgErrorf(0, "unterminated literal '") + } + if len(tok) == 2 { + // Must be a single escaped quote, '' + buf.WriteByte(esc) + } else { + // The content (until a closing esc) is printed out verbatim + // except that we must un-double any double-esc escapes in + // the middle of the string. + raw := tok[1 : len(tok)-1] + for i := 0; i < len(raw); i++ { + buf.WriteByte(raw[i]) + if raw[i] == esc { + i++ // skip the escaped quote + } + } + } + + case startsDateFormatVerb(tok[0]): + switch tok[0] { + case 'Y': + y := t.Year() + switch len(tok) { + case 2: + fmt.Fprintf(&buf, "%02d", y%100) + case 4: + fmt.Fprintf(&buf, "%04d", y) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: year must either be \"YY\" or \"YYYY\"", tok) + } + case 'M': + m := t.Month() + switch len(tok) { + case 1: + fmt.Fprintf(&buf, "%d", m) + case 2: + fmt.Fprintf(&buf, "%02d", m) + case 3: + buf.WriteString(m.String()[:3]) + case 4: + buf.WriteString(m.String()) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: month must be \"M\", \"MM\", \"MMM\", or \"MMMM\"", tok) + } + case 'D': + d := t.Day() + switch len(tok) { + case 1: + fmt.Fprintf(&buf, "%d", d) + case 2: + fmt.Fprintf(&buf, "%02d", d) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: day of month must either be \"D\" or \"DD\"", tok) + } + case 'E': + d := t.Weekday() + switch len(tok) { + case 3: + buf.WriteString(d.String()[:3]) + case 4: + buf.WriteString(d.String()) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: day of week must either be \"EEE\" or \"EEEE\"", tok) + } + case 'h': + h := t.Hour() + switch len(tok) { + case 1: + fmt.Fprintf(&buf, "%d", h) + case 2: + fmt.Fprintf(&buf, "%02d", h) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: 24-hour must either be \"h\" or \"hh\"", tok) + } + case 'H': + h := t.Hour() % 12 + if h == 0 { + h = 12 + } + switch len(tok) { + case 1: + fmt.Fprintf(&buf, "%d", h) + case 2: + fmt.Fprintf(&buf, "%02d", h) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: 12-hour must either be \"H\" or \"HH\"", tok) + } + case 'A', 'a': + if len(tok) != 2 { + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: must be \"%s%s\"", tok, tok[0:1], tok[0:1]) + } + upper := tok[0] == 'A' + switch t.Hour() / 12 { + case 0: + if upper { + buf.WriteString("AM") + } else { + buf.WriteString("am") + } + case 1: + if upper { + buf.WriteString("PM") + } else { + buf.WriteString("pm") + } + } + case 'm': + m := t.Minute() + switch len(tok) { + case 1: + fmt.Fprintf(&buf, "%d", m) + case 2: + fmt.Fprintf(&buf, "%02d", m) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: minute must either be \"m\" or \"mm\"", tok) + } + case 's': + s := t.Second() + switch len(tok) { + case 1: + fmt.Fprintf(&buf, "%d", s) + case 2: + fmt.Fprintf(&buf, "%02d", s) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: second must either be \"s\" or \"ss\"", tok) + } + case 'Z': + // We'll just lean on Go's own formatter for this one, since + // the necessary information is unexported. + switch len(tok) { + case 1: + buf.WriteString(t.Format("Z07:00")) + case 3: + str := t.Format("-0700") + switch str { + case "+0000": + buf.WriteString("UTC") + default: + buf.WriteString(str) + } + case 4: + buf.WriteString(t.Format("-0700")) + case 5: + buf.WriteString(t.Format("-07:00")) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: timezone must be Z, ZZZZ, or ZZZZZ", tok) + } + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q", tok) + } + + default: + // Any other starting character indicates a literal sequence + buf.Write(tok) + } + } + + return cty.StringVal(buf.String()), nil + }, +}) + +// FormatDate reformats a timestamp given in RFC3339 syntax into another time +// syntax defined by a given format string. +// +// The format string uses letter mnemonics to represent portions of the +// timestamp, with repetition signifying length variants of each portion. +// Single quote characters ' can be used to quote sequences of literal letters +// that should not be interpreted as formatting mnemonics. +// +// The full set of supported mnemonic sequences is listed below: +// +// YY Year modulo 100 zero-padded to two digits, like "06". +// YYYY Four (or more) digit year, like "2006". +// M Month number, like "1" for January. +// MM Month number zero-padded to two digits, like "01". +// MMM English month name abbreviated to three letters, like "Jan". +// MMMM English month name unabbreviated, like "January". +// D Day of month number, like "2". +// DD Day of month number zero-padded to two digits, like "02". +// EEE English day of week name abbreviated to three letters, like "Mon". +// EEEE English day of week name unabbreviated, like "Monday". +// h 24-hour number, like "2". +// hh 24-hour number zero-padded to two digits, like "02". +// H 12-hour number, like "2". +// HH 12-hour number zero-padded to two digits, like "02". +// AA Hour AM/PM marker in uppercase, like "AM". +// aa Hour AM/PM marker in lowercase, like "am". +// m Minute within hour, like "5". +// mm Minute within hour zero-padded to two digits, like "05". +// s Second within minute, like "9". +// ss Second within minute zero-padded to two digits, like "09". +// ZZZZ Timezone offset with just sign and digit, like "-0800". +// ZZZZZ Timezone offset with colon separating hours and minutes, like "-08:00". +// Z Like ZZZZZ but with a special case "Z" for UTC. +// ZZZ Like ZZZZ but with a special case "UTC" for UTC. +// +// The format syntax is optimized mainly for generating machine-oriented +// timestamps rather than human-oriented timestamps; the English language +// portions of the output reflect the use of English names in a number of +// machine-readable date formatting standards. For presentation to humans, +// a locale-aware time formatter (not included in this package) is a better +// choice. +// +// The format syntax is not compatible with that of any other language, but +// is optimized so that patterns for common standard date formats can be +// recognized quickly even by a reader unfamiliar with the format syntax. +func FormatDate(format cty.Value, timestamp cty.Value) (cty.Value, error) { + return FormatDateFunc.Call([]cty.Value{format, timestamp}) +} + +func parseTimestamp(ts string) (time.Time, error) { + t, err := time.Parse(time.RFC3339, ts) + if err != nil { + switch err := err.(type) { + case *time.ParseError: + // If err is s time.ParseError then its string representation is not + // appropriate since it relies on details of Go's strange date format + // representation, which a caller of our functions is not expected + // to be familiar with. + // + // Therefore we do some light transformation to get a more suitable + // error that should make more sense to our callers. These are + // still not awesome error messages, but at least they refer to + // the timestamp portions by name rather than by Go's example + // values. + if err.LayoutElem == "" && err.ValueElem == "" && err.Message != "" { + // For some reason err.Message is populated with a ": " prefix + // by the time package. + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp%s", err.Message) + } + var what string + switch err.LayoutElem { + case "2006": + what = "year" + case "01": + what = "month" + case "02": + what = "day of month" + case "15": + what = "hour" + case "04": + what = "minute" + case "05": + what = "second" + case "Z07:00": + what = "UTC offset" + case "T": + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: missing required time introducer 'T'") + case ":", "-": + if err.ValueElem == "" { + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: end of string where %q is expected", err.LayoutElem) + } else { + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: found %q where %q is expected", err.ValueElem, err.LayoutElem) + } + default: + // Should never get here, because time.RFC3339 includes only the + // above portions, but since that might change in future we'll + // be robust here. + what = "timestamp segment" + } + if err.ValueElem == "" { + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: end of string before %s", what) + } else { + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: cannot use %q as %s", err.ValueElem, what) + } + } + return time.Time{}, err + } + return t, nil +} + +// splitDataFormat is a bufio.SplitFunc used to tokenize a date format. +func splitDateFormat(data []byte, atEOF bool) (advance int, token []byte, err error) { + if len(data) == 0 { + return 0, nil, nil + } + + const esc = '\'' + + switch { + + case data[0] == esc: + // If we have another quote immediately after then this is a single + // escaped escape. + if len(data) > 1 && data[1] == esc { + return 2, data[:2], nil + } + + // Beginning of quoted sequence, so we will seek forward until we find + // the closing quote, ignoring escaped quotes along the way. + for i := 1; i < len(data); i++ { + if data[i] == esc { + if (i + 1) == len(data) { + // We need at least one more byte to decide if this is an + // escape or a terminator. + return 0, nil, nil + } + if data[i+1] == esc { + i++ // doubled-up quotes are an escape sequence + continue + } + // We've found the closing quote + return i + 1, data[:i+1], nil + } + } + // If we fall out here then we need more bytes to find the end, + // unless we're already at the end with an unclosed quote. + if atEOF { + return len(data), data, nil + } + return 0, nil, nil + + case startsDateFormatVerb(data[0]): + rep := data[0] + for i := 1; i < len(data); i++ { + if data[i] != rep { + return i, data[:i], nil + } + } + if atEOF { + return len(data), data, nil + } + // We need more data to decide if we've found the end + return 0, nil, nil + + default: + for i := 1; i < len(data); i++ { + if data[i] == esc || startsDateFormatVerb(data[i]) { + return i, data[:i], nil + } + } + // We might not actually be at the end of a literal sequence, + // but that doesn't matter since we'll concat them back together + // anyway. + return len(data), data, nil + } +} + +func startsDateFormatVerb(b byte) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.go index 86876ba9fe..32b1ac9712 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.go @@ -11,9 +11,10 @@ import ( "unicode/utf8" "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" ) -// line 20 "format_fsm.go" +// line 21 "format_fsm.go" var _formatfsm_actions []byte = []byte{ 0, 1, 0, 1, 1, 1, 2, 1, 4, 1, 5, 1, 6, 1, 7, 1, 8, @@ -86,15 +87,16 @@ const formatfsm_error int = 0 const formatfsm_en_main int = 8 -// line 19 "format_fsm.rl" +// line 20 "format_fsm.rl" func formatFSM(format string, a []cty.Value) (string, error) { var buf bytes.Buffer data := format nextArg := 1 // arg numbers are 1-based var verb formatVerb + highestArgIdx := 0 // zero means "none", since arg numbers are 1-based - // line 153 "format_fsm.rl" + // line 159 "format_fsm.rl" // Ragel state p := 0 // "Pointer" into data @@ -109,12 +111,12 @@ func formatFSM(format string, a []cty.Value) (string, error) { _ = te _ = eof - // line 121 "format_fsm.go" + // line 123 "format_fsm.go" { cs = formatfsm_start } - // line 126 "format_fsm.go" + // line 128 "format_fsm.go" { var _klen int var _trans int @@ -195,7 +197,7 @@ func formatFSM(format string, a []cty.Value) (string, error) { _acts++ switch _formatfsm_actions[_acts-1] { case 0: - // line 29 "format_fsm.rl" + // line 31 "format_fsm.rl" verb = formatVerb{ ArgNum: nextArg, @@ -205,12 +207,12 @@ func formatFSM(format string, a []cty.Value) (string, error) { ts = p case 1: - // line 38 "format_fsm.rl" + // line 40 "format_fsm.rl" buf.WriteByte(data[p]) case 4: - // line 49 "format_fsm.rl" + // line 51 "format_fsm.rl" // We'll try to slurp a whole UTF-8 sequence here, to give the user // better feedback. @@ -218,85 +220,89 @@ func formatFSM(format string, a []cty.Value) (string, error) { return buf.String(), fmt.Errorf("unrecognized format character %q at offset %d", r, p) case 5: - // line 56 "format_fsm.rl" + // line 58 "format_fsm.rl" verb.Sharp = true case 6: - // line 59 "format_fsm.rl" + // line 61 "format_fsm.rl" verb.Zero = true case 7: - // line 62 "format_fsm.rl" + // line 64 "format_fsm.rl" verb.Minus = true case 8: - // line 65 "format_fsm.rl" + // line 67 "format_fsm.rl" verb.Plus = true case 9: - // line 68 "format_fsm.rl" + // line 70 "format_fsm.rl" verb.Space = true case 10: - // line 72 "format_fsm.rl" + // line 74 "format_fsm.rl" verb.ArgNum = 0 case 11: - // line 75 "format_fsm.rl" + // line 77 "format_fsm.rl" verb.ArgNum = (10 * verb.ArgNum) + (int(data[p]) - '0') case 12: - // line 79 "format_fsm.rl" + // line 81 "format_fsm.rl" verb.HasWidth = true case 13: - // line 82 "format_fsm.rl" + // line 84 "format_fsm.rl" verb.Width = 0 case 14: - // line 85 "format_fsm.rl" + // line 87 "format_fsm.rl" verb.Width = (10 * verb.Width) + (int(data[p]) - '0') case 15: - // line 89 "format_fsm.rl" + // line 91 "format_fsm.rl" verb.HasPrec = true case 16: - // line 92 "format_fsm.rl" + // line 94 "format_fsm.rl" verb.Prec = 0 case 17: - // line 95 "format_fsm.rl" + // line 97 "format_fsm.rl" verb.Prec = (10 * verb.Prec) + (int(data[p]) - '0') case 18: - // line 99 "format_fsm.rl" + // line 101 "format_fsm.rl" verb.Mode = rune(data[p]) te = p + 1 verb.Raw = data[ts:te] verb.Offset = ts + if verb.ArgNum > highestArgIdx { + highestArgIdx = verb.ArgNum + } + err := formatAppend(&verb, &buf, a) if err != nil { return buf.String(), err } nextArg = verb.ArgNum + 1 - // line 324 "format_fsm.go" + // line 330 "format_fsm.go" } } @@ -319,22 +325,22 @@ func formatFSM(format string, a []cty.Value) (string, error) { __acts++ switch _formatfsm_actions[__acts-1] { case 2: - // line 42 "format_fsm.rl" + // line 44 "format_fsm.rl" case 3: - // line 45 "format_fsm.rl" + // line 47 "format_fsm.rl" return buf.String(), fmt.Errorf("invalid format string starting at offset %d", p) case 4: - // line 49 "format_fsm.rl" + // line 51 "format_fsm.rl" // We'll try to slurp a whole UTF-8 sequence here, to give the user // better feedback. r, _ := utf8.DecodeRuneInString(data[p:]) return buf.String(), fmt.Errorf("unrecognized format character %q at offset %d", r, p) - // line 363 "format_fsm.go" + // line 369 "format_fsm.go" } } } @@ -344,14 +350,24 @@ func formatFSM(format string, a []cty.Value) (string, error) { } } - // line 171 "format_fsm.rl" + // line 177 "format_fsm.rl" // If we fall out here without being in a final state then we've // encountered something that the scanner can't match, which should // be impossible (the scanner matches all bytes _somehow_) but we'll // flag it anyway rather than just losing data from the end. if cs < formatfsm_first_final { - return buf.String(), fmt.Errorf("extraneous characters beginning at offset %i", p) + return buf.String(), fmt.Errorf("extraneous characters beginning at offset %d", p) + } + + if highestArgIdx < len(a) { + // Extraneous args are an error, to more easily detect mistakes + firstBad := highestArgIdx + 1 + if highestArgIdx == 0 { + // Custom error message for this case + return buf.String(), function.NewArgErrorf(firstBad, "too many arguments; no verbs in format string") + } + return buf.String(), function.NewArgErrorf(firstBad, "too many arguments; only %d used by format string", highestArgIdx) } return buf.String(), nil diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.rl b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.rl index 85d43bb77f..3c642d9e14 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.rl +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.rl @@ -12,6 +12,7 @@ import ( "unicode/utf8" "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" ) %%{ @@ -23,6 +24,7 @@ func formatFSM(format string, a []cty.Value) (string, error) { data := format nextArg := 1 // arg numbers are 1-based var verb formatVerb + highestArgIdx := 0 // zero means "none", since arg numbers are 1-based %%{ @@ -102,6 +104,10 @@ func formatFSM(format string, a []cty.Value) (string, error) { verb.Raw = data[ts:te] verb.Offset = ts + if verb.ArgNum > highestArgIdx { + highestArgIdx = verb.ArgNum + } + err := formatAppend(&verb, &buf, a) if err != nil { return buf.String(), err @@ -175,7 +181,17 @@ func formatFSM(format string, a []cty.Value) (string, error) { // be impossible (the scanner matches all bytes _somehow_) but we'll // flag it anyway rather than just losing data from the end. if cs < formatfsm_first_final { - return buf.String(), fmt.Errorf("extraneous characters beginning at offset %i", p) + return buf.String(), fmt.Errorf("extraneous characters beginning at offset %d", p) + } + + if highestArgIdx < len(a) { + // Extraneous args are an error, to more easily detect mistakes + firstBad := highestArgIdx+1 + if highestArgIdx == 0 { + // Custom error message for this case + return buf.String(), function.NewArgErrorf(firstBad, "too many arguments; no verbs in format string") + } + return buf.String(), function.NewArgErrorf(firstBad, "too many arguments; only %d used by format string", highestArgIdx) } return buf.String(), nil diff --git a/vendor/github.com/zclconf/go-cty/cty/function/unpredictable.go b/vendor/github.com/zclconf/go-cty/cty/function/unpredictable.go new file mode 100644 index 0000000000..3495550af5 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/unpredictable.go @@ -0,0 +1,31 @@ +package function + +import ( + "github.com/zclconf/go-cty/cty" +) + +// Unpredictable wraps a given function such that it retains the same arguments +// and type checking behavior but will return an unknown value when called. +// +// It is recommended that most functions be "pure", which is to say that they +// will always produce the same value given particular input. However, +// sometimes it is necessary to offer functions whose behavior depends on +// some external state, such as reading a file or determining the current time. +// In such cases, an unpredictable wrapper might be used to stand in for +// the function during some sort of prior "checking" phase in order to delay +// the actual effect until later. +// +// While Unpredictable can support a function that isn't pure in its +// implementation, it still expects a function to be pure in its type checking +// behavior, except for the special case of returning cty.DynamicPseudoType +// if it is not yet able to predict its return value based on current argument +// information. +func Unpredictable(f Function) Function { + newSpec := *f.spec // shallow copy + newSpec.Impl = unpredictableImpl + return New(&newSpec) +} + +func unpredictableImpl(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.UnknownVal(retType), nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/gob.go b/vendor/github.com/zclconf/go-cty/cty/gob.go index 3d731993bc..a77dace27e 100644 --- a/vendor/github.com/zclconf/go-cty/cty/gob.go +++ b/vendor/github.com/zclconf/go-cty/cty/gob.go @@ -103,11 +103,11 @@ func (t *Type) GobDecode(buf []byte) error { // Capsule types cannot currently be gob-encoded, because they rely on pointer // equality and we have no way to recover the original pointer on decode. func (t *capsuleType) GobEncode() ([]byte, error) { - return nil, fmt.Errorf("cannot gob-encode capsule type %q", t.FriendlyName()) + return nil, fmt.Errorf("cannot gob-encode capsule type %q", t.FriendlyName(friendlyTypeName)) } func (t *capsuleType) GobDecode() ([]byte, error) { - return nil, fmt.Errorf("cannot gob-decode capsule type %q", t.FriendlyName()) + return nil, fmt.Errorf("cannot gob-decode capsule type %q", t.FriendlyName(friendlyTypeName)) } type gobValue struct { diff --git a/vendor/github.com/zclconf/go-cty/cty/gocty/in.go b/vendor/github.com/zclconf/go-cty/cty/gocty/in.go index 642501b25b..ca9de21d2e 100644 --- a/vendor/github.com/zclconf/go-cty/cty/gocty/in.go +++ b/vendor/github.com/zclconf/go-cty/cty/gocty/in.go @@ -5,6 +5,7 @@ import ( "reflect" "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" "github.com/zclconf/go-cty/cty/set" ) @@ -32,6 +33,11 @@ func ToCtyValue(val interface{}, ty cty.Type) (cty.Value, error) { } func toCtyValue(val reflect.Value, ty cty.Type, path cty.Path) (cty.Value, error) { + if val != (reflect.Value{}) && val.Type().AssignableTo(valueType) { + // If the source value is a cty.Value then we'll try to just pass + // through to the target type directly. + return toCtyPassthrough(val, ty, path) + } switch ty { case cty.Bool: @@ -505,6 +511,20 @@ func toCtyDynamic(val reflect.Value, path cty.Path) (cty.Value, error) { } +func toCtyPassthrough(wrappedVal reflect.Value, wantTy cty.Type, path cty.Path) (cty.Value, error) { + if wrappedVal = toCtyUnwrapPointer(wrappedVal); !wrappedVal.IsValid() { + return cty.NullVal(wantTy), nil + } + + givenVal := wrappedVal.Interface().(cty.Value) + + val, err := convert.Convert(givenVal, wantTy) + if err != nil { + return cty.NilVal, path.NewErrorf("unsuitable value: %s", err) + } + return val, nil +} + // toCtyUnwrapPointer is a helper for dealing with Go pointers. It has three // possible outcomes: // diff --git a/vendor/github.com/zclconf/go-cty/cty/json/type_implied.go b/vendor/github.com/zclconf/go-cty/cty/json/type_implied.go index 1a973066e4..0fa13f6c53 100644 --- a/vendor/github.com/zclconf/go-cty/cty/json/type_implied.go +++ b/vendor/github.com/zclconf/go-cty/cty/json/type_implied.go @@ -138,7 +138,7 @@ func impliedObjectType(dec *json.Decoder) (cty.Type, error) { } func impliedTupleType(dec *json.Decoder) (cty.Type, error) { - // By the time we get in here, we've already consumed the { delimiter + // By the time we get in here, we've already consumed the [ delimiter // and so our next token should be the first value. var etys []cty.Type @@ -150,10 +150,9 @@ func impliedTupleType(dec *json.Decoder) (cty.Type, error) { } if ttok, ok := tok.(json.Delim); ok { - if rune(ttok) != ']' { - return cty.NilType, fmt.Errorf("unexpected delimiter %q", ttok) + if rune(ttok) == ']' { + break } - break } ety, err := impliedTypeForTok(tok, dec) diff --git a/vendor/github.com/zclconf/go-cty/cty/json/unmarshal.go b/vendor/github.com/zclconf/go-cty/cty/json/unmarshal.go index 155f0b8a1a..38106455fe 100644 --- a/vendor/github.com/zclconf/go-cty/cty/json/unmarshal.go +++ b/vendor/github.com/zclconf/go-cty/cty/json/unmarshal.go @@ -72,7 +72,7 @@ func unmarshalPrimitive(tok json.Token, t cty.Type, path cty.Path) (cty.Value, e } switch v := tok.(type) { case string: - val, err := convert.Convert(cty.StringVal(v), t) + val, err := cty.ParseNumberVal(v) if err != nil { return cty.NilVal, path.NewError(err) } diff --git a/vendor/github.com/zclconf/go-cty/cty/list_type.go b/vendor/github.com/zclconf/go-cty/cty/list_type.go index eadc865e54..2ef02a12f3 100644 --- a/vendor/github.com/zclconf/go-cty/cty/list_type.go +++ b/vendor/github.com/zclconf/go-cty/cty/list_type.go @@ -33,8 +33,14 @@ func (t typeList) Equals(other Type) bool { return t.ElementTypeT.Equals(ot.ElementTypeT) } -func (t typeList) FriendlyName() string { - return "list of " + t.ElementTypeT.FriendlyName() +func (t typeList) FriendlyName(mode friendlyTypeNameMode) string { + elemName := t.ElementTypeT.friendlyNameMode(mode) + if mode == friendlyTypeConstraintName { + if t.ElementTypeT == DynamicPseudoType { + elemName = "any single type" + } + } + return "list of " + elemName } func (t typeList) ElementType() Type { diff --git a/vendor/github.com/zclconf/go-cty/cty/map_type.go b/vendor/github.com/zclconf/go-cty/cty/map_type.go index ae9abae040..82d36c6282 100644 --- a/vendor/github.com/zclconf/go-cty/cty/map_type.go +++ b/vendor/github.com/zclconf/go-cty/cty/map_type.go @@ -33,8 +33,14 @@ func (t typeMap) Equals(other Type) bool { return t.ElementTypeT.Equals(ot.ElementTypeT) } -func (t typeMap) FriendlyName() string { - return "map of " + t.ElementTypeT.FriendlyName() +func (t typeMap) FriendlyName(mode friendlyTypeNameMode) string { + elemName := t.ElementTypeT.friendlyNameMode(mode) + if mode == friendlyTypeConstraintName { + if t.ElementTypeT == DynamicPseudoType { + elemName = "any single type" + } + } + return "map of " + elemName } func (t typeMap) ElementType() Type { diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/doc.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/doc.go new file mode 100644 index 0000000000..1eb99f28a3 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/msgpack/doc.go @@ -0,0 +1,14 @@ +// Package msgpack provides functions for serializing cty values in the +// msgpack encoding, and decoding them again. +// +// If the same type information is provided both at encoding and decoding time +// then values can be round-tripped without loss, except for capsule types +// which are not currently supported. +// +// If any unknown values are passed to Marshal then they will be represented +// using a msgpack extension with type code zero, which is understood by +// the Unmarshal function within this package but will not be understood by +// a generic (non-cty-aware) msgpack decoder. Ensure that no unknown values +// are used if interoperability with other msgpack implementations is +// required. +package msgpack diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/dynamic.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/dynamic.go new file mode 100644 index 0000000000..1b631d0a17 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/msgpack/dynamic.go @@ -0,0 +1,31 @@ +package msgpack + +import ( + "bytes" + + "github.com/vmihailenco/msgpack" + "github.com/zclconf/go-cty/cty" +) + +type dynamicVal struct { + Value cty.Value + Path cty.Path +} + +func (dv *dynamicVal) MarshalMsgpack() ([]byte, error) { + // Rather than defining a msgpack-specific serialization of types, + // instead we use the existing JSON serialization. + typeJSON, err := dv.Value.Type().MarshalJSON() + if err != nil { + return nil, dv.Path.NewErrorf("failed to serialize type: %s", err) + } + var buf bytes.Buffer + enc := msgpack.NewEncoder(&buf) + enc.EncodeArrayLen(2) + enc.EncodeBytes(typeJSON) + err = marshal(dv.Value, dv.Value.Type(), dv.Path, enc) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/infinity.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/infinity.go new file mode 100644 index 0000000000..6db0815e44 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/msgpack/infinity.go @@ -0,0 +1,8 @@ +package msgpack + +import ( + "math" +) + +var negativeInfinity = math.Inf(-1) +var positiveInfinity = math.Inf(1) diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/marshal.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/marshal.go new file mode 100644 index 0000000000..87b096ca4a --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/msgpack/marshal.go @@ -0,0 +1,207 @@ +package msgpack + +import ( + "bytes" + "math/big" + "sort" + + "github.com/vmihailenco/msgpack" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// Marshal produces a msgpack serialization of the given value that +// can be decoded into the given type later using Unmarshal. +// +// The given value must conform to the given type, or an error will +// be returned. +func Marshal(val cty.Value, ty cty.Type) ([]byte, error) { + errs := val.Type().TestConformance(ty) + if errs != nil { + // Attempt a conversion + var err error + val, err = convert.Convert(val, ty) + if err != nil { + return nil, err + } + } + + // From this point onward, val can be assumed to be conforming to t. + + var path cty.Path + var buf bytes.Buffer + enc := msgpack.NewEncoder(&buf) + + err := marshal(val, ty, path, enc) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func marshal(val cty.Value, ty cty.Type, path cty.Path, enc *msgpack.Encoder) error { + // If we're going to decode as DynamicPseudoType then we need to save + // dynamic type information to recover the real type. + if ty == cty.DynamicPseudoType && val.Type() != cty.DynamicPseudoType { + return marshalDynamic(val, path, enc) + } + + if !val.IsKnown() { + err := enc.Encode(unknownVal) + if err != nil { + return path.NewError(err) + } + return nil + } + if val.IsNull() { + err := enc.EncodeNil() + if err != nil { + return path.NewError(err) + } + return nil + } + + // The caller should've guaranteed that the given val is conformant with + // the given type ty, so we'll proceed under that assumption here. + switch { + case ty.IsPrimitiveType(): + switch ty { + case cty.String: + err := enc.EncodeString(val.AsString()) + if err != nil { + return path.NewError(err) + } + return nil + case cty.Number: + var err error + switch { + case val.RawEquals(cty.PositiveInfinity): + err = enc.EncodeFloat64(positiveInfinity) + case val.RawEquals(cty.NegativeInfinity): + err = enc.EncodeFloat64(negativeInfinity) + default: + bf := val.AsBigFloat() + if iv, acc := bf.Int64(); acc == big.Exact { + err = enc.EncodeInt(iv) + } else if fv, acc := bf.Float64(); acc == big.Exact { + err = enc.EncodeFloat64(fv) + } else { + err = enc.EncodeString(bf.Text('f', -1)) + } + } + if err != nil { + return path.NewError(err) + } + return nil + case cty.Bool: + err := enc.EncodeBool(val.True()) + if err != nil { + return path.NewError(err) + } + return nil + default: + panic("unsupported primitive type") + } + case ty.IsListType(), ty.IsSetType(): + enc.EncodeArrayLen(val.LengthInt()) + ety := ty.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, enc) + if err != nil { + return err + } + } + return nil + case ty.IsMapType(): + enc.EncodeMapLen(val.LengthInt()) + ety := ty.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + var err error + err = marshal(ek, ek.Type(), path, enc) + if err != nil { + return err + } + err = marshal(ev, ety, path, enc) + if err != nil { + return err + } + } + return nil + case ty.IsTupleType(): + etys := ty.TupleElementTypes() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + i := 0 + enc.EncodeArrayLen(len(etys)) + for it.Next() { + ety := etys[i] + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, enc) + if err != nil { + return err + } + i++ + } + return nil + case ty.IsObjectType(): + atys := ty.AttributeTypes() + path := append(path, nil) // local override of 'path' with extra element + + names := make([]string, 0, len(atys)) + for k := range atys { + names = append(names, k) + } + sort.Strings(names) + + enc.EncodeMapLen(len(names)) + + for _, k := range names { + aty := atys[k] + av := val.GetAttr(k) + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + var err error + err = marshal(cty.StringVal(k), cty.String, path, enc) + if err != nil { + return err + } + err = marshal(av, aty, path, enc) + if err != nil { + return err + } + } + return nil + case ty.IsCapsuleType(): + return path.NewErrorf("capsule types not supported for msgpack encoding") + default: + // should never happen + return path.NewErrorf("cannot msgpack-serialize %s", ty.FriendlyName()) + } +} + +// marshalDynamic adds an extra wrapping object containing dynamic type +// information for the given value. +func marshalDynamic(val cty.Value, path cty.Path, enc *msgpack.Encoder) error { + dv := dynamicVal{ + Value: val, + Path: path, + } + return enc.Encode(&dv) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/type_implied.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/type_implied.go new file mode 100644 index 0000000000..6f6022e4d5 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/msgpack/type_implied.go @@ -0,0 +1,167 @@ +package msgpack + +import ( + "bytes" + "fmt" + "io" + + "github.com/vmihailenco/msgpack" + msgpackcodes "github.com/vmihailenco/msgpack/codes" + "github.com/zclconf/go-cty/cty" +) + +// ImpliedType returns the cty Type implied by the structure of the given +// msgpack-compliant buffer. This function implements the default type mapping +// behavior used when decoding arbitrary msgpack without explicit cty Type +// information. +// +// The rules are as follows: +// +// msgpack strings, numbers and bools map to their equivalent primitive type in +// cty. +// +// msgpack maps become cty object types, with the attributes defined by the +// map keys and the types of their values. +// +// msgpack arrays become cty tuple types, with the elements defined by the +// types of the array members. +// +// Any nulls are typed as DynamicPseudoType, so callers of this function +// must be prepared to deal with this. Callers that do not wish to deal with +// dynamic typing should not use this function and should instead describe +// their required types explicitly with a cty.Type instance when decoding. +// +// Any unknown values are similarly typed as DynamicPseudoType, because these +// do not carry type information on the wire. +// +// Any parse errors will be returned as an error, and the type will be the +// invalid value cty.NilType. +func ImpliedType(buf []byte) (cty.Type, error) { + r := bytes.NewReader(buf) + dec := msgpack.NewDecoder(r) + + ty, err := impliedType(dec) + if err != nil { + return cty.NilType, err + } + + // We must now be at the end of the buffer + err = dec.Skip() + if err != io.EOF { + return ty, fmt.Errorf("extra bytes after msgpack value") + } + + return ty, nil +} + +func impliedType(dec *msgpack.Decoder) (cty.Type, error) { + // If this function returns with a nil error then it must have already + // consumed the next value from the decoder, since when called recursively + // the caller will be expecting to find a following value here. + + code, err := dec.PeekCode() + if err != nil { + return cty.NilType, err + } + + switch { + + case code == msgpackcodes.Nil || msgpackcodes.IsExt(code): + err := dec.Skip() + return cty.DynamicPseudoType, err + + case code == msgpackcodes.True || code == msgpackcodes.False: + _, err := dec.DecodeBool() + return cty.Bool, err + + case msgpackcodes.IsFixedNum(code): + _, err := dec.DecodeInt64() + return cty.Number, err + + case code == msgpackcodes.Int8 || code == msgpackcodes.Int16 || code == msgpackcodes.Int32 || code == msgpackcodes.Int64: + _, err := dec.DecodeInt64() + return cty.Number, err + + case code == msgpackcodes.Uint8 || code == msgpackcodes.Uint16 || code == msgpackcodes.Uint32 || code == msgpackcodes.Uint64: + _, err := dec.DecodeUint64() + return cty.Number, err + + case code == msgpackcodes.Float || code == msgpackcodes.Double: + _, err := dec.DecodeFloat64() + return cty.Number, err + + case msgpackcodes.IsString(code): + _, err := dec.DecodeString() + return cty.String, err + + case msgpackcodes.IsFixedMap(code) || code == msgpackcodes.Map16 || code == msgpackcodes.Map32: + return impliedObjectType(dec) + + case msgpackcodes.IsFixedArray(code) || code == msgpackcodes.Array16 || code == msgpackcodes.Array32: + return impliedTupleType(dec) + + default: + return cty.NilType, fmt.Errorf("unsupported msgpack code %#v", code) + } +} + +func impliedObjectType(dec *msgpack.Decoder) (cty.Type, error) { + // If we get in here then we've already peeked the next code and know + // it's some sort of map. + l, err := dec.DecodeMapLen() + if err != nil { + return cty.DynamicPseudoType, nil + } + + var atys map[string]cty.Type + + for i := 0; i < l; i++ { + // Read the map key first. We require maps to be strings, but msgpack + // doesn't so we're prepared to error here if not. + k, err := dec.DecodeString() + if err != nil { + return cty.DynamicPseudoType, err + } + + aty, err := impliedType(dec) + if err != nil { + return cty.DynamicPseudoType, err + } + + if atys == nil { + atys = make(map[string]cty.Type) + } + atys[k] = aty + } + + if len(atys) == 0 { + return cty.EmptyObject, nil + } + + return cty.Object(atys), nil +} + +func impliedTupleType(dec *msgpack.Decoder) (cty.Type, error) { + // If we get in here then we've already peeked the next code and know + // it's some sort of array. + l, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicPseudoType, nil + } + + if l == 0 { + return cty.EmptyTuple, nil + } + + etys := make([]cty.Type, l) + + for i := 0; i < l; i++ { + ety, err := impliedType(dec) + if err != nil { + return cty.DynamicPseudoType, err + } + etys[i] = ety + } + + return cty.Tuple(etys), nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/unknown.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/unknown.go new file mode 100644 index 0000000000..6507bc4be0 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/msgpack/unknown.go @@ -0,0 +1,16 @@ +package msgpack + +type unknownType struct{} + +var unknownVal = unknownType{} + +// unknownValBytes is the raw bytes of the msgpack fixext1 value we +// write to represent an unknown value. It's an extension value of +// type zero whose value is irrelevant. Since it's irrelevant, we +// set it to a single byte whose value is also zero, since that's +// the most compact possible representation. +var unknownValBytes = []byte{0xd4, 0, 0} + +func (uv unknownType) MarshalMsgpack() ([]byte, error) { + return unknownValBytes, nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go new file mode 100644 index 0000000000..51bb76a8a7 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go @@ -0,0 +1,334 @@ +package msgpack + +import ( + "bytes" + + "github.com/vmihailenco/msgpack" + msgpackCodes "github.com/vmihailenco/msgpack/codes" + "github.com/zclconf/go-cty/cty" +) + +// Unmarshal interprets the given bytes as a msgpack-encoded cty Value of +// the given type, returning the result. +// +// If an error is returned, the error is written with a hypothetical +// end-user that wrote the msgpack file as its audience, using cty type +// system concepts rather than Go type system concepts. +func Unmarshal(b []byte, ty cty.Type) (cty.Value, error) { + r := bytes.NewReader(b) + dec := msgpack.NewDecoder(r) + + var path cty.Path + return unmarshal(dec, ty, path) +} + +func unmarshal(dec *msgpack.Decoder, ty cty.Type, path cty.Path) (cty.Value, error) { + peek, err := dec.PeekCode() + if err != nil { + return cty.DynamicVal, path.NewError(err) + } + if msgpackCodes.IsExt(peek) { + // We just assume _all_ extensions are unknown values, + // since we don't have any other extensions. + dec.Skip() // skip what we've peeked + return cty.UnknownVal(ty), nil + } + if ty == cty.DynamicPseudoType { + return unmarshalDynamic(dec, path) + } + if peek == msgpackCodes.Nil { + dec.Skip() // skip what we've peeked + return cty.NullVal(ty), nil + } + + switch { + case ty.IsPrimitiveType(): + val, err := unmarshalPrimitive(dec, ty, path) + if err != nil { + return cty.NilVal, err + } + return val, nil + case ty.IsListType(): + return unmarshalList(dec, ty.ElementType(), path) + case ty.IsSetType(): + return unmarshalSet(dec, ty.ElementType(), path) + case ty.IsMapType(): + return unmarshalMap(dec, ty.ElementType(), path) + case ty.IsTupleType(): + return unmarshalTuple(dec, ty.TupleElementTypes(), path) + case ty.IsObjectType(): + return unmarshalObject(dec, ty.AttributeTypes(), path) + default: + return cty.NilVal, path.NewErrorf("unsupported type %s", ty.FriendlyName()) + } +} + +func unmarshalPrimitive(dec *msgpack.Decoder, ty cty.Type, path cty.Path) (cty.Value, error) { + switch ty { + case cty.Bool: + rv, err := dec.DecodeBool() + if err != nil { + return cty.DynamicVal, path.NewErrorf("bool is required") + } + return cty.BoolVal(rv), nil + case cty.Number: + // Marshal will try int and float first, if the value can be + // losslessly represented in these encodings, and then fall + // back on a string if the number is too large or too precise. + peek, err := dec.PeekCode() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + + if msgpackCodes.IsFixedNum(peek) { + rv, err := dec.DecodeInt64() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return cty.NumberIntVal(rv), nil + } + + switch peek { + case msgpackCodes.Int8, msgpackCodes.Int16, msgpackCodes.Int32, msgpackCodes.Int64: + rv, err := dec.DecodeInt64() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return cty.NumberIntVal(rv), nil + case msgpackCodes.Uint8, msgpackCodes.Uint16, msgpackCodes.Uint32, msgpackCodes.Uint64: + rv, err := dec.DecodeUint64() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return cty.NumberUIntVal(rv), nil + case msgpackCodes.Float, msgpackCodes.Double: + rv, err := dec.DecodeFloat64() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return cty.NumberFloatVal(rv), nil + default: + rv, err := dec.DecodeString() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + v, err := cty.ParseNumberVal(rv) + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return v, nil + } + case cty.String: + rv, err := dec.DecodeString() + if err != nil { + return cty.DynamicVal, path.NewErrorf("string is required") + } + return cty.StringVal(rv), nil + default: + // should never happen + panic("unsupported primitive type") + } +} + +func unmarshalList(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("a list is required") + } + + switch { + case length < 0: + return cty.NullVal(cty.List(ety)), nil + case length == 0: + return cty.ListValEmpty(ety), nil + } + + vals := make([]cty.Value, 0, length) + path = append(path, nil) + for i := 0; i < length; i++ { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + val, err := unmarshal(dec, ety, path) + if err != nil { + return cty.DynamicVal, err + } + + vals = append(vals, val) + } + + return cty.ListVal(vals), nil +} + +func unmarshalSet(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("a set is required") + } + + switch { + case length < 0: + return cty.NullVal(cty.Set(ety)), nil + case length == 0: + return cty.SetValEmpty(ety), nil + } + + vals := make([]cty.Value, 0, length) + path = append(path, nil) + for i := 0; i < length; i++ { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + val, err := unmarshal(dec, ety, path) + if err != nil { + return cty.DynamicVal, err + } + + vals = append(vals, val) + } + + return cty.SetVal(vals), nil +} + +func unmarshalMap(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeMapLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("a map is required") + } + + switch { + case length < 0: + return cty.NullVal(cty.Map(ety)), nil + case length == 0: + return cty.MapValEmpty(ety), nil + } + + vals := make(map[string]cty.Value, length) + path = append(path, nil) + for i := 0; i < length; i++ { + key, err := dec.DecodeString() + if err != nil { + path[:len(path)-1].NewErrorf("non-string key in map") + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(key), + } + + val, err := unmarshal(dec, ety, path) + if err != nil { + return cty.DynamicVal, err + } + + vals[key] = val + } + + return cty.MapVal(vals), nil +} + +func unmarshalTuple(dec *msgpack.Decoder, etys []cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("a tuple is required") + } + + switch { + case length < 0: + return cty.NullVal(cty.Tuple(etys)), nil + case length == 0: + return cty.TupleVal(nil), nil + case length != len(etys): + return cty.DynamicVal, path.NewErrorf("a tuple of length %d is required", len(etys)) + } + + vals := make([]cty.Value, 0, length) + path = append(path, nil) + for i := 0; i < length; i++ { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + ety := etys[i] + + val, err := unmarshal(dec, ety, path) + if err != nil { + return cty.DynamicVal, err + } + + vals = append(vals, val) + } + + return cty.TupleVal(vals), nil +} + +func unmarshalObject(dec *msgpack.Decoder, atys map[string]cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeMapLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("an object is required") + } + + switch { + case length < 0: + return cty.NullVal(cty.Object(atys)), nil + case length == 0: + return cty.ObjectVal(nil), nil + case length != len(atys): + return cty.DynamicVal, path.NewErrorf("an object with %d attributes is required (%d given)", + len(atys), length) + } + + vals := make(map[string]cty.Value, length) + path = append(path, nil) + for i := 0; i < length; i++ { + key, err := dec.DecodeString() + if err != nil { + return cty.DynamicVal, path[:len(path)-1].NewErrorf("all keys must be strings") + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(key), + } + aty, exists := atys[key] + if !exists { + return cty.DynamicVal, path.NewErrorf("unsupported attribute") + } + + val, err := unmarshal(dec, aty, path) + if err != nil { + return cty.DynamicVal, err + } + + vals[key] = val + } + + return cty.ObjectVal(vals), nil +} + +func unmarshalDynamic(dec *msgpack.Decoder, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicVal, path.NewError(err) + } + + switch { + case length == -1: + return cty.NullVal(cty.DynamicPseudoType), nil + case length != 2: + return cty.DynamicVal, path.NewErrorf( + "dynamic value array must have exactly two elements", + ) + } + + typeJSON, err := dec.DecodeBytes() + if err != nil { + return cty.DynamicVal, path.NewError(err) + } + var ty cty.Type + err = (&ty).UnmarshalJSON(typeJSON) + if err != nil { + return cty.DynamicVal, path.NewError(err) + } + + return unmarshal(dec, ty, path) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/object_type.go b/vendor/github.com/zclconf/go-cty/cty/object_type.go index 2540883ca7..187d38751b 100644 --- a/vendor/github.com/zclconf/go-cty/cty/object_type.go +++ b/vendor/github.com/zclconf/go-cty/cty/object_type.go @@ -51,7 +51,7 @@ func (t typeObject) Equals(other Type) bool { return false } -func (t typeObject) FriendlyName() string { +func (t typeObject) FriendlyName(mode friendlyTypeNameMode) string { // There isn't really a friendly way to write an object type due to its // complexity, so we'll just do something English-ish. Callers will // probably want to make some extra effort to avoid ever printing out diff --git a/vendor/github.com/zclconf/go-cty/cty/path.go b/vendor/github.com/zclconf/go-cty/cty/path.go index d38c2231c6..625bfdf854 100644 --- a/vendor/github.com/zclconf/go-cty/cty/path.go +++ b/vendor/github.com/zclconf/go-cty/cty/path.go @@ -15,6 +15,10 @@ import ( // but callers can also feel free to just produce a slice of PathStep manually // and convert to this type, which may be more appropriate in environments // where memory pressure is a concern. +// +// Although a Path is technically mutable, by convention callers should not +// mutate a path once it has been built and passed to some other subsystem. +// Instead, use Copy and then mutate the copy before using it. type Path []PathStep // PathStep represents a single step down into a data structure, as part @@ -132,9 +136,13 @@ type IndexStep struct { // Apply returns the value resulting from indexing the given value with // our key value. func (s IndexStep) Apply(val Value) (Value, error) { + if val == NilVal || val.IsNull() { + return NilVal, errors.New("cannot index a null value") + } + switch s.Key.Type() { case Number: - if !val.Type().IsListType() { + if !(val.Type().IsListType() || val.Type().IsTupleType()) { return NilVal, errors.New("not a list type") } case String: @@ -156,6 +164,10 @@ func (s IndexStep) Apply(val Value) (Value, error) { return val.Index(s.Key), nil } +func (s IndexStep) GoString() string { + return fmt.Sprintf("cty.IndexStep{Key:%#v}", s.Key) +} + // GetAttrStep is a Step implementation representing retrieving an attribute // from a value, which must be of an object type. type GetAttrStep struct { @@ -166,6 +178,10 @@ type GetAttrStep struct { // Apply returns the value of our named attribute from the given value, which // must be of an object type that has a value of that name. func (s GetAttrStep) Apply(val Value) (Value, error) { + if val == NilVal || val.IsNull() { + return NilVal, errors.New("cannot access attributes on a null value") + } + if !val.Type().IsObjectType() { return NilVal, errors.New("not an object type") } @@ -176,3 +192,7 @@ func (s GetAttrStep) Apply(val Value) (Value, error) { return val.GetAttr(s.Name), nil } + +func (s GetAttrStep) GoString() string { + return fmt.Sprintf("cty.GetAttrStep{Name:%q}", s.Name) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/path_set.go b/vendor/github.com/zclconf/go-cty/cty/path_set.go new file mode 100644 index 0000000000..f1c892b9d9 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/path_set.go @@ -0,0 +1,198 @@ +package cty + +import ( + "fmt" + "hash/crc64" + + "github.com/zclconf/go-cty/cty/set" +) + +// PathSet represents a set of Path objects. This can be used, for example, +// to talk about a subset of paths within a value that meet some criteria, +// without directly modifying the values at those paths. +type PathSet struct { + set set.Set +} + +// NewPathSet creates and returns a PathSet, with initial contents optionally +// set by the given arguments. +func NewPathSet(paths ...Path) PathSet { + ret := PathSet{ + set: set.NewSet(pathSetRules{}), + } + + for _, path := range paths { + ret.Add(path) + } + + return ret +} + +// Add inserts a single given path into the set. +// +// Paths are immutable after construction by convention. It is particularly +// important not to mutate a path after it has been placed into a PathSet. +// If a Path is mutated while in a set, behavior is undefined. +func (s PathSet) Add(path Path) { + s.set.Add(path) +} + +// AddAllSteps is like Add but it also adds all of the steps leading to +// the given path. +// +// For example, if given a path representing "foo.bar", it will add both +// "foo" and "bar". +func (s PathSet) AddAllSteps(path Path) { + for i := 1; i <= len(path); i++ { + s.Add(path[:i]) + } +} + +// Has returns true if the given path is in the receiving set. +func (s PathSet) Has(path Path) bool { + return s.set.Has(path) +} + +// List makes and returns a slice of all of the paths in the receiving set, +// in an undefined but consistent order. +func (s PathSet) List() []Path { + if s.Empty() { + return nil + } + ret := make([]Path, 0, s.set.Length()) + for it := s.set.Iterator(); it.Next(); { + ret = append(ret, it.Value().(Path)) + } + return ret +} + +// Remove modifies the receving set to no longer include the given path. +// If the given path was already absent, this is a no-op. +func (s PathSet) Remove(path Path) { + s.set.Remove(path) +} + +// Empty returns true if the length of the receiving set is zero. +func (s PathSet) Empty() bool { + return s.set.Length() == 0 +} + +// Union returns a new set whose contents are the union of the receiver and +// the given other set. +func (s PathSet) Union(other PathSet) PathSet { + return PathSet{ + set: s.set.Union(other.set), + } +} + +// Intersection returns a new set whose contents are the intersection of the +// receiver and the given other set. +func (s PathSet) Intersection(other PathSet) PathSet { + return PathSet{ + set: s.set.Intersection(other.set), + } +} + +// Subtract returns a new set whose contents are those from the receiver with +// any elements of the other given set subtracted. +func (s PathSet) Subtract(other PathSet) PathSet { + return PathSet{ + set: s.set.Subtract(other.set), + } +} + +// SymmetricDifference returns a new set whose contents are the symmetric +// difference of the receiver and the given other set. +func (s PathSet) SymmetricDifference(other PathSet) PathSet { + return PathSet{ + set: s.set.SymmetricDifference(other.set), + } +} + +// Equal returns true if and only if both the receiver and the given other +// set contain exactly the same paths. +func (s PathSet) Equal(other PathSet) bool { + if s.set.Length() != other.set.Length() { + return false + } + // Now we know the lengths are the same we only need to test in one + // direction whether everything in one is in the other. + for it := s.set.Iterator(); it.Next(); { + if !other.set.Has(it.Value()) { + return false + } + } + return true +} + +var crc64Table = crc64.MakeTable(crc64.ISO) + +var indexStepPlaceholder = []byte("#") + +// pathSetRules is an implementation of set.Rules from the set package, +// used internally within PathSet. +type pathSetRules struct { +} + +func (r pathSetRules) Hash(v interface{}) int { + path := v.(Path) + hash := crc64.New(crc64Table) + + for _, rawStep := range path { + switch step := rawStep.(type) { + case GetAttrStep: + // (this creates some garbage converting the string name to a + // []byte, but that's okay since cty is not designed to be + // used in tight loops under memory pressure.) + hash.Write([]byte(step.Name)) + default: + // For any other step type we just append a predefined value, + // which means that e.g. all indexes into a given collection will + // hash to the same value but we assume that collections are + // small and thus this won't hurt too much. + hash.Write(indexStepPlaceholder) + } + } + + // We discard half of the hash on 32-bit platforms; collisions just make + // our lookups take marginally longer, so not a big deal. + return int(hash.Sum64()) +} + +func (r pathSetRules) Equivalent(a, b interface{}) bool { + aPath := a.(Path) + bPath := b.(Path) + + if len(aPath) != len(bPath) { + return false + } + + for i := range aPath { + switch aStep := aPath[i].(type) { + case GetAttrStep: + bStep, ok := bPath[i].(GetAttrStep) + if !ok { + return false + } + + if aStep.Name != bStep.Name { + return false + } + case IndexStep: + bStep, ok := bPath[i].(IndexStep) + if !ok { + return false + } + + eq := aStep.Key.Equals(bStep.Key) + if !eq.IsKnown() || eq.False() { + return false + } + default: + // Should never happen, since we document PathStep as a closed type. + panic(fmt.Errorf("unsupported step type %T", aStep)) + } + } + + return true +} diff --git a/vendor/github.com/zclconf/go-cty/cty/primitive_type.go b/vendor/github.com/zclconf/go-cty/cty/primitive_type.go index b8682dd3d1..7b3d1196cd 100644 --- a/vendor/github.com/zclconf/go-cty/cty/primitive_type.go +++ b/vendor/github.com/zclconf/go-cty/cty/primitive_type.go @@ -24,7 +24,7 @@ func (t primitiveType) Equals(other Type) bool { return false } -func (t primitiveType) FriendlyName() string { +func (t primitiveType) FriendlyName(mode friendlyTypeNameMode) string { switch t.Kind { case primitiveTypeBool: return "bool" diff --git a/vendor/github.com/zclconf/go-cty/cty/set_internals.go b/vendor/github.com/zclconf/go-cty/cty/set_internals.go index ce738dbe66..1d7a731aa7 100644 --- a/vendor/github.com/zclconf/go-cty/cty/set_internals.go +++ b/vendor/github.com/zclconf/go-cty/cty/set_internals.go @@ -19,12 +19,24 @@ type setRules struct { Type Type } +// Hash returns a hash value for the receiver that can be used for equality +// checks where some inaccuracy is tolerable. +// +// The hash function is value-type-specific, so it is not meaningful to compare +// hash results for values of different types. +// +// This function is not safe to use for security-related applications, since +// the hash used is not strong enough. +func (val Value) Hash() int { + hashBytes := makeSetHashBytes(val) + return int(crc32.ChecksumIEEE(hashBytes)) +} + func (r setRules) Hash(v interface{}) int { - hashBytes := makeSetHashBytes(Value{ + return Value{ ty: r.Type, v: v, - }) - return int(crc32.ChecksumIEEE(hashBytes)) + }.Hash() } func (r setRules) Equivalent(v1 interface{}, v2 interface{}) bool { diff --git a/vendor/github.com/zclconf/go-cty/cty/set_type.go b/vendor/github.com/zclconf/go-cty/cty/set_type.go index 952a2d2baa..cbc3706f2c 100644 --- a/vendor/github.com/zclconf/go-cty/cty/set_type.go +++ b/vendor/github.com/zclconf/go-cty/cty/set_type.go @@ -31,8 +31,14 @@ func (t typeSet) Equals(other Type) bool { return t.ElementTypeT.Equals(ot.ElementTypeT) } -func (t typeSet) FriendlyName() string { - return "set of " + t.ElementTypeT.FriendlyName() +func (t typeSet) FriendlyName(mode friendlyTypeNameMode) string { + elemName := t.ElementTypeT.friendlyNameMode(mode) + if mode == friendlyTypeConstraintName { + if t.ElementTypeT == DynamicPseudoType { + elemName = "any single type" + } + } + return "set of " + elemName } func (t typeSet) ElementType() Type { diff --git a/vendor/github.com/zclconf/go-cty/cty/tuple_type.go b/vendor/github.com/zclconf/go-cty/cty/tuple_type.go index b98349e3a0..798cacd63a 100644 --- a/vendor/github.com/zclconf/go-cty/cty/tuple_type.go +++ b/vendor/github.com/zclconf/go-cty/cty/tuple_type.go @@ -44,7 +44,7 @@ func (t typeTuple) Equals(other Type) bool { return false } -func (t typeTuple) FriendlyName() string { +func (t typeTuple) FriendlyName(mode friendlyTypeNameMode) string { // There isn't really a friendly way to write a tuple type due to its // complexity, so we'll just do something English-ish. Callers will // probably want to make some extra effort to avoid ever printing out diff --git a/vendor/github.com/zclconf/go-cty/cty/type.go b/vendor/github.com/zclconf/go-cty/cty/type.go index ae5f1c83ef..730cb9862e 100644 --- a/vendor/github.com/zclconf/go-cty/cty/type.go +++ b/vendor/github.com/zclconf/go-cty/cty/type.go @@ -19,7 +19,7 @@ type typeImpl interface { // FriendlyName returns a human-friendly *English* name for the given // type. - FriendlyName() string + FriendlyName(mode friendlyTypeNameMode) string // GoString implements the GoStringer interface from package fmt. GoString() string @@ -41,7 +41,25 @@ func (t Type) Equals(other Type) bool { // FriendlyName returns a human-friendly *English* name for the given type. func (t Type) FriendlyName() string { - return t.typeImpl.FriendlyName() + return t.typeImpl.FriendlyName(friendlyTypeName) +} + +// FriendlyNameForConstraint is similar to FriendlyName except that the +// result is specialized for describing type _constraints_ rather than types +// themselves. This is more appropriate when reporting that a particular value +// does not conform to an expected type constraint. +// +// In particular, this function uses the term "any type" to refer to +// cty.DynamicPseudoType, rather than "dynamic" as returned by FriendlyName. +func (t Type) FriendlyNameForConstraint() string { + return t.typeImpl.FriendlyName(friendlyTypeConstraintName) +} + +// friendlyNameMode is an internal combination of the various FriendlyName* +// variants that just directly takes a mode, for easy passthrough for +// recursive name construction. +func (t Type) friendlyNameMode(mode friendlyTypeNameMode) string { + return t.typeImpl.FriendlyName(mode) } // GoString returns a string approximating how the receiver type would be @@ -93,3 +111,10 @@ func (t Type) HasDynamicTypes() bool { panic("HasDynamicTypes does not support the given type") } } + +type friendlyTypeNameMode rune + +const ( + friendlyTypeName friendlyTypeNameMode = 'N' + friendlyTypeConstraintName friendlyTypeNameMode = 'C' +) diff --git a/vendor/github.com/zclconf/go-cty/cty/type_conform.go b/vendor/github.com/zclconf/go-cty/cty/type_conform.go index b417dc79b4..476eeea87f 100644 --- a/vendor/github.com/zclconf/go-cty/cty/type_conform.go +++ b/vendor/github.com/zclconf/go-cty/cty/type_conform.go @@ -50,23 +50,20 @@ func testConformance(given Type, want Type, path Path, errs *[]error) { givenAttrs := given.AttributeTypes() wantAttrs := want.AttributeTypes() - if len(givenAttrs) != len(wantAttrs) { - // Something is missing from one of them. - for k := range givenAttrs { - if _, exists := wantAttrs[k]; !exists { - *errs = append( - *errs, - errorf(path, "unsupported attribute %q", k), - ) - } + for k := range givenAttrs { + if _, exists := wantAttrs[k]; !exists { + *errs = append( + *errs, + errorf(path, "unsupported attribute %q", k), + ) } - for k := range wantAttrs { - if _, exists := givenAttrs[k]; !exists { - *errs = append( - *errs, - errorf(path, "missing required attribute %q", k), - ) - } + } + for k := range wantAttrs { + if _, exists := givenAttrs[k]; !exists { + *errs = append( + *errs, + errorf(path, "missing required attribute %q", k), + ) } } diff --git a/vendor/github.com/zclconf/go-cty/cty/unknown.go b/vendor/github.com/zclconf/go-cty/cty/unknown.go index 9f6fce99df..e54179eb14 100644 --- a/vendor/github.com/zclconf/go-cty/cty/unknown.go +++ b/vendor/github.com/zclconf/go-cty/cty/unknown.go @@ -54,8 +54,13 @@ func (t pseudoTypeDynamic) Equals(other Type) bool { return ok } -func (t pseudoTypeDynamic) FriendlyName() string { - return "dynamic" +func (t pseudoTypeDynamic) FriendlyName(mode friendlyTypeNameMode) string { + switch mode { + case friendlyTypeConstraintName: + return "any type" + default: + return "dynamic" + } } func (t pseudoTypeDynamic) GoString() string { diff --git a/vendor/github.com/zclconf/go-cty/cty/unknown_as_null.go b/vendor/github.com/zclconf/go-cty/cty/unknown_as_null.go new file mode 100644 index 0000000000..ba926475ce --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/unknown_as_null.go @@ -0,0 +1,64 @@ +package cty + +// UnknownAsNull returns a value of the same type as the given value but +// with any unknown values (including nested values) replaced with null +// values of the same type. +// +// This can be useful if a result is to be serialized in a format that can't +// represent unknowns, such as JSON, as long as the caller does not need to +// retain the unknown value information. +func UnknownAsNull(val Value) Value { + ty := val.Type() + switch { + case val.IsNull(): + return val + case !val.IsKnown(): + return NullVal(ty) + case ty.IsListType() || ty.IsTupleType() || ty.IsSetType(): + length := val.LengthInt() + if length == 0 { + // If there are no elements then we can't have unknowns + return val + } + vals := make([]Value, 0, length) + it := val.ElementIterator() + for it.Next() { + _, v := it.Element() + vals = append(vals, UnknownAsNull(v)) + } + switch { + case ty.IsListType(): + return ListVal(vals) + case ty.IsTupleType(): + return TupleVal(vals) + default: + return SetVal(vals) + } + case ty.IsMapType() || ty.IsObjectType(): + var length int + switch { + case ty.IsMapType(): + length = val.LengthInt() + default: + length = len(val.Type().AttributeTypes()) + } + if length == 0 { + // If there are no elements then we can't have unknowns + return val + } + vals := make(map[string]Value, length) + it := val.ElementIterator() + for it.Next() { + k, v := it.Element() + vals[k.AsString()] = UnknownAsNull(v) + } + switch { + case ty.IsMapType(): + return MapVal(vals) + default: + return ObjectVal(vals) + } + } + + return val +} diff --git a/vendor/github.com/zclconf/go-cty/cty/value_init.go b/vendor/github.com/zclconf/go-cty/cty/value_init.go index 495a83e610..a9abbbb06f 100644 --- a/vendor/github.com/zclconf/go-cty/cty/value_init.go +++ b/vendor/github.com/zclconf/go-cty/cty/value_init.go @@ -30,6 +30,32 @@ func NumberVal(v *big.Float) Value { } } +// ParseNumberVal returns a Value of type number produced by parsing the given +// string as a decimal real number. To ensure that two identical strings will +// always produce an equal number, always use this function to derive a number +// from a string; it will ensure that the precision and rounding mode for the +// internal big decimal is configured in a consistent way. +// +// If the given string cannot be parsed as a number, the returned error has +// the message "a number is required", making it suitable to return to an +// end-user to signal a type conversion error. +// +// If the given string contains a number that becomes a recurring fraction +// when expressed in binary then it will be truncated to have a 512-bit +// mantissa. Note that this is a higher precision than that of a float64, +// so coverting the same decimal number first to float64 and then calling +// NumberFloatVal will not produce an equal result; the conversion first +// to float64 will round the mantissa to fewer than 512 bits. +func ParseNumberVal(s string) (Value, error) { + // Base 10, precision 512, and rounding to nearest even is the standard + // way to handle numbers arriving as strings. + f, _, err := big.ParseFloat(s, 10, 512, big.ToNearestEven) + if err != nil { + return NilVal, fmt.Errorf("a number is required") + } + return NumberVal(f), nil +} + // NumberIntVal returns a Value of type Number whose internal value is equal // to the given integer. func NumberIntVal(v int64) Value { diff --git a/vendor/github.com/zclconf/go-cty/cty/value_ops.go b/vendor/github.com/zclconf/go-cty/cty/value_ops.go index 967aa761df..436f9b0b1a 100644 --- a/vendor/github.com/zclconf/go-cty/cty/value_ops.go +++ b/vendor/github.com/zclconf/go-cty/cty/value_ops.go @@ -14,16 +14,15 @@ func (val Value) GoString() string { return "cty.NilVal" } - if val.ty == DynamicPseudoType { + if val.IsNull() { + return fmt.Sprintf("cty.NullVal(%#v)", val.ty) + } + if val == DynamicVal { // is unknown, so must be before the IsKnown check below return "cty.DynamicVal" } - if !val.IsKnown() { return fmt.Sprintf("cty.UnknownVal(%#v)", val.ty) } - if val.IsNull() { - return fmt.Sprintf("cty.NullVal(%#v)", val.ty) - } // By the time we reach here we've dealt with all of the exceptions around // unknowns and nulls, so we're guaranteed that the values are the @@ -71,26 +70,67 @@ func (val Value) GoString() string { // Equals returns True if the receiver and the given other value have the // same type and are exactly equal in value. // -// The usual short-circuit rules apply, so the result can be unknown or typed -// as dynamic if either of the given values are. Use RawEquals to compare -// if two values are equal *ignoring* the short-circuit rules. +// As a special case, two null values are always equal regardless of type. +// +// The usual short-circuit rules apply, so the result will be unknown if +// either of the given values are. +// +// Use RawEquals to compare if two values are equal *ignoring* the +// short-circuit rules and the exception for null values. func (val Value) Equals(other Value) Value { - if val.ty.HasDynamicTypes() || other.ty.HasDynamicTypes() { + // Start by handling Unknown values before considering types. + // This needs to be done since Null values are always equal regardless of + // type. + switch { + case !val.IsKnown() && !other.IsKnown(): + // both unknown return UnknownVal(Bool) + case val.IsKnown() && !other.IsKnown(): + switch { + case val.IsNull(), other.ty.HasDynamicTypes(): + // If known is Null, we need to wait for the unkown value since + // nulls of any type are equal. + // An unkown with a dynamic type compares as unknown, which we need + // to check before the type comparison below. + return UnknownVal(Bool) + case !val.ty.Equals(other.ty): + // There is no null comparison or dynamic types, so unequal types + // will never be equal. + return False + default: + return UnknownVal(Bool) + } + case other.IsKnown() && !val.IsKnown(): + switch { + case other.IsNull(), val.ty.HasDynamicTypes(): + // If known is Null, we need to wait for the unkown value since + // nulls of any type are equal. + // An unkown with a dynamic type compares as unknown, which we need + // to check before the type comparison below. + return UnknownVal(Bool) + case !other.ty.Equals(val.ty): + // There's no null comparison or dynamic types, so unequal types + // will never be equal. + return False + default: + return UnknownVal(Bool) + } } - if !val.ty.Equals(other.ty) { + switch { + case val.IsNull() && other.IsNull(): + // Nulls are always equal, regardless of type + return BoolVal(true) + case val.IsNull() || other.IsNull(): + // If only one is null then the result must be false return BoolVal(false) } - if !(val.IsKnown() && other.IsKnown()) { + if val.ty.HasDynamicTypes() || other.ty.HasDynamicTypes() { return UnknownVal(Bool) } - if val.IsNull() || other.IsNull() { - if val.IsNull() && other.IsNull() { - return BoolVal(true) - } + if !val.ty.Equals(other.ty) { return BoolVal(false) } @@ -758,7 +798,7 @@ func (val Value) HasElement(elem Value) Value { if val.IsNull() { panic("can't call HasElement on a nil value") } - if ty.ElementType() != elem.Type() { + if !ty.ElementType().Equals(elem.Type()) { return False } @@ -800,6 +840,10 @@ func (val Value) LengthInt() int { // For tuples, we can return the length even if the value is not known. return val.Type().Length() } + if val.Type().IsObjectType() { + // For objects, the length is the number of attributes associated with the type. + return len(val.Type().AttributeTypes()) + } if !val.IsKnown() { panic("value is not known") } diff --git a/vendor/github.com/zclconf/go-cty/cty/walk.go b/vendor/github.com/zclconf/go-cty/cty/walk.go new file mode 100644 index 0000000000..a6943babef --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/walk.go @@ -0,0 +1,182 @@ +package cty + +// Walk visits all of the values in a possibly-complex structure, calling +// a given function for each value. +// +// For example, given a list of strings the callback would first be called +// with the whole list and then called once for each element of the list. +// +// The callback function may prevent recursive visits to child values by +// returning false. The callback function my halt the walk altogether by +// returning a non-nil error. If the returned error is about the element +// currently being visited, it is recommended to use the provided path +// value to produce a PathError describing that context. +// +// The path passed to the given function may not be used after that function +// returns, since its backing array is re-used for other calls. +func Walk(val Value, cb func(Path, Value) (bool, error)) error { + var path Path + return walk(path, val, cb) +} + +func walk(path Path, val Value, cb func(Path, Value) (bool, error)) error { + deeper, err := cb(path, val) + if err != nil { + return err + } + if !deeper { + return nil + } + + if val.IsNull() || !val.IsKnown() { + // Can't recurse into null or unknown values, regardless of type + return nil + } + + ty := val.Type() + switch { + case ty.IsObjectType(): + for it := val.ElementIterator(); it.Next(); { + nameVal, av := it.Element() + path := append(path, GetAttrStep{ + Name: nameVal.AsString(), + }) + err := walk(path, av, cb) + if err != nil { + return err + } + } + case val.CanIterateElements(): + for it := val.ElementIterator(); it.Next(); { + kv, ev := it.Element() + path := append(path, IndexStep{ + Key: kv, + }) + err := walk(path, ev, cb) + if err != nil { + return err + } + } + } + return nil +} + +// Transform visits all of the values in a possibly-complex structure, +// calling a given function for each value which has an opportunity to +// replace that value. +// +// Unlike Walk, Transform visits child nodes first, so for a list of strings +// it would first visit the strings and then the _new_ list constructed +// from the transformed values of the list items. +// +// This is useful for creating the effect of being able to make deep mutations +// to a value even though values are immutable. However, it's the responsibility +// of the given function to preserve expected invariants, such as homogenity of +// element types in collections; this function can panic if such invariants +// are violated, just as if new values were constructed directly using the +// value constructor functions. An easy way to preserve invariants is to +// ensure that the transform function never changes the value type. +// +// The callback function my halt the walk altogether by +// returning a non-nil error. If the returned error is about the element +// currently being visited, it is recommended to use the provided path +// value to produce a PathError describing that context. +// +// The path passed to the given function may not be used after that function +// returns, since its backing array is re-used for other calls. +func Transform(val Value, cb func(Path, Value) (Value, error)) (Value, error) { + var path Path + return transform(path, val, cb) +} + +func transform(path Path, val Value, cb func(Path, Value) (Value, error)) (Value, error) { + ty := val.Type() + var newVal Value + + switch { + + case val.IsNull() || !val.IsKnown(): + // Can't recurse into null or unknown values, regardless of type + newVal = val + + case ty.IsListType() || ty.IsSetType() || ty.IsTupleType(): + l := val.LengthInt() + switch l { + case 0: + // No deep transform for an empty sequence + newVal = val + default: + elems := make([]Value, 0, l) + for it := val.ElementIterator(); it.Next(); { + kv, ev := it.Element() + path := append(path, IndexStep{ + Key: kv, + }) + newEv, err := transform(path, ev, cb) + if err != nil { + return DynamicVal, err + } + elems = append(elems, newEv) + } + switch { + case ty.IsListType(): + newVal = ListVal(elems) + case ty.IsSetType(): + newVal = SetVal(elems) + case ty.IsTupleType(): + newVal = TupleVal(elems) + default: + panic("unknown sequence type") // should never happen because of the case we are in + } + } + + case ty.IsMapType(): + l := val.LengthInt() + switch l { + case 0: + // No deep transform for an empty map + newVal = val + default: + elems := make(map[string]Value) + for it := val.ElementIterator(); it.Next(); { + kv, ev := it.Element() + path := append(path, IndexStep{ + Key: kv, + }) + newEv, err := transform(path, ev, cb) + if err != nil { + return DynamicVal, err + } + elems[kv.AsString()] = newEv + } + newVal = MapVal(elems) + } + + case ty.IsObjectType(): + switch { + case ty.Equals(EmptyObject): + // No deep transform for an empty object + newVal = val + default: + atys := ty.AttributeTypes() + newAVs := make(map[string]Value) + for name := range atys { + av := val.GetAttr(name) + path := append(path, GetAttrStep{ + Name: name, + }) + newAV, err := transform(path, av, cb) + if err != nil { + return DynamicVal, err + } + newAVs[name] = newAV + } + newVal = ObjectVal(newAVs) + } + + default: + newVal = val + } + + return cb(path, newVal) +} diff --git a/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/golang.org/x/crypto/blowfish/cipher.go index 2641dadd64..213bf204af 100644 --- a/vendor/golang.org/x/crypto/blowfish/cipher.go +++ b/vendor/golang.org/x/crypto/blowfish/cipher.go @@ -3,6 +3,14 @@ // license that can be found in the LICENSE file. // Package blowfish implements Bruce Schneier's Blowfish encryption algorithm. +// +// Blowfish is a legacy cipher and its short block size makes it vulnerable to +// birthday bound attacks (see https://sweet32.info). It should only be used +// where compatibility with legacy systems, not security, is the goal. +// +// Deprecated: any new system should use AES (from crypto/aes, if necessary in +// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from +// golang.org/x/crypto/chacha20poly1305). package blowfish // import "golang.org/x/crypto/blowfish" // The code is a port of Bruce Schneier's C implementation. diff --git a/vendor/golang.org/x/crypto/cast5/cast5.go b/vendor/golang.org/x/crypto/cast5/cast5.go index 0b4af37bdc..ddcbeb6f2a 100644 --- a/vendor/golang.org/x/crypto/cast5/cast5.go +++ b/vendor/golang.org/x/crypto/cast5/cast5.go @@ -2,8 +2,15 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package cast5 implements CAST5, as defined in RFC 2144. CAST5 is a common -// OpenPGP cipher. +// Package cast5 implements CAST5, as defined in RFC 2144. +// +// CAST5 is a legacy cipher and its short block size makes it vulnerable to +// birthday bound attacks (see https://sweet32.info). It should only be used +// where compatibility with legacy systems, not security, is the goal. +// +// Deprecated: any new system should use AES (from crypto/aes, if necessary in +// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from +// golang.org/x/crypto/chacha20poly1305). package cast5 // import "golang.org/x/crypto/cast5" import "errors" diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go index cb8fbc57b9..75f24babb6 100644 --- a/vendor/golang.org/x/crypto/curve25519/curve25519.go +++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go @@ -86,7 +86,7 @@ func feFromBytes(dst *fieldElement, src *[32]byte) { h6 := load3(src[20:]) << 7 h7 := load3(src[23:]) << 5 h8 := load3(src[26:]) << 4 - h9 := load3(src[29:]) << 2 + h9 := (load3(src[29:]) & 0x7fffff) << 2 var carry [10]int64 carry[9] = (h9 + 1<<24) >> 25 diff --git a/vendor/golang.org/x/crypto/internal/chacha20/asm_arm64.s b/vendor/golang.org/x/crypto/internal/chacha20/asm_arm64.s new file mode 100644 index 0000000000..b3a16ef751 --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/chacha20/asm_arm64.s @@ -0,0 +1,308 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.11 +// +build !gccgo,!appengine + +#include "textflag.h" + +#define NUM_ROUNDS 10 + +// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) +TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0 + MOVD dst+0(FP), R1 + MOVD src+24(FP), R2 + MOVD src_len+32(FP), R3 + MOVD key+48(FP), R4 + MOVD nonce+56(FP), R6 + MOVD counter+64(FP), R7 + + MOVD $·constants(SB), R10 + MOVD $·incRotMatrix(SB), R11 + + MOVW (R7), R20 + + AND $~255, R3, R13 + ADD R2, R13, R12 // R12 for block end + AND $255, R3, R13 +loop: + MOVD $NUM_ROUNDS, R21 + VLD1 (R11), [V30.S4, V31.S4] + + // load contants + // VLD4R (R10), [V0.S4, V1.S4, V2.S4, V3.S4] + WORD $0x4D60E940 + + // load keys + // VLD4R 16(R4), [V4.S4, V5.S4, V6.S4, V7.S4] + WORD $0x4DFFE884 + // VLD4R 16(R4), [V8.S4, V9.S4, V10.S4, V11.S4] + WORD $0x4DFFE888 + SUB $32, R4 + + // load counter + nonce + // VLD1R (R7), [V12.S4] + WORD $0x4D40C8EC + + // VLD3R (R6), [V13.S4, V14.S4, V15.S4] + WORD $0x4D40E8CD + + // update counter + VADD V30.S4, V12.S4, V12.S4 + +chacha: + // V0..V3 += V4..V7 + // V12..V15 <<<= ((V12..V15 XOR V0..V3), 16) + VADD V0.S4, V4.S4, V0.S4 + VADD V1.S4, V5.S4, V1.S4 + VADD V2.S4, V6.S4, V2.S4 + VADD V3.S4, V7.S4, V3.S4 + VEOR V12.B16, V0.B16, V12.B16 + VEOR V13.B16, V1.B16, V13.B16 + VEOR V14.B16, V2.B16, V14.B16 + VEOR V15.B16, V3.B16, V15.B16 + VREV32 V12.H8, V12.H8 + VREV32 V13.H8, V13.H8 + VREV32 V14.H8, V14.H8 + VREV32 V15.H8, V15.H8 + // V8..V11 += V12..V15 + // V4..V7 <<<= ((V4..V7 XOR V8..V11), 12) + VADD V8.S4, V12.S4, V8.S4 + VADD V9.S4, V13.S4, V9.S4 + VADD V10.S4, V14.S4, V10.S4 + VADD V11.S4, V15.S4, V11.S4 + VEOR V8.B16, V4.B16, V16.B16 + VEOR V9.B16, V5.B16, V17.B16 + VEOR V10.B16, V6.B16, V18.B16 + VEOR V11.B16, V7.B16, V19.B16 + VSHL $12, V16.S4, V4.S4 + VSHL $12, V17.S4, V5.S4 + VSHL $12, V18.S4, V6.S4 + VSHL $12, V19.S4, V7.S4 + VSRI $20, V16.S4, V4.S4 + VSRI $20, V17.S4, V5.S4 + VSRI $20, V18.S4, V6.S4 + VSRI $20, V19.S4, V7.S4 + + // V0..V3 += V4..V7 + // V12..V15 <<<= ((V12..V15 XOR V0..V3), 8) + VADD V0.S4, V4.S4, V0.S4 + VADD V1.S4, V5.S4, V1.S4 + VADD V2.S4, V6.S4, V2.S4 + VADD V3.S4, V7.S4, V3.S4 + VEOR V12.B16, V0.B16, V12.B16 + VEOR V13.B16, V1.B16, V13.B16 + VEOR V14.B16, V2.B16, V14.B16 + VEOR V15.B16, V3.B16, V15.B16 + VTBL V31.B16, [V12.B16], V12.B16 + VTBL V31.B16, [V13.B16], V13.B16 + VTBL V31.B16, [V14.B16], V14.B16 + VTBL V31.B16, [V15.B16], V15.B16 + + // V8..V11 += V12..V15 + // V4..V7 <<<= ((V4..V7 XOR V8..V11), 7) + VADD V12.S4, V8.S4, V8.S4 + VADD V13.S4, V9.S4, V9.S4 + VADD V14.S4, V10.S4, V10.S4 + VADD V15.S4, V11.S4, V11.S4 + VEOR V8.B16, V4.B16, V16.B16 + VEOR V9.B16, V5.B16, V17.B16 + VEOR V10.B16, V6.B16, V18.B16 + VEOR V11.B16, V7.B16, V19.B16 + VSHL $7, V16.S4, V4.S4 + VSHL $7, V17.S4, V5.S4 + VSHL $7, V18.S4, V6.S4 + VSHL $7, V19.S4, V7.S4 + VSRI $25, V16.S4, V4.S4 + VSRI $25, V17.S4, V5.S4 + VSRI $25, V18.S4, V6.S4 + VSRI $25, V19.S4, V7.S4 + + // V0..V3 += V5..V7, V4 + // V15,V12-V14 <<<= ((V15,V12-V14 XOR V0..V3), 16) + VADD V0.S4, V5.S4, V0.S4 + VADD V1.S4, V6.S4, V1.S4 + VADD V2.S4, V7.S4, V2.S4 + VADD V3.S4, V4.S4, V3.S4 + VEOR V15.B16, V0.B16, V15.B16 + VEOR V12.B16, V1.B16, V12.B16 + VEOR V13.B16, V2.B16, V13.B16 + VEOR V14.B16, V3.B16, V14.B16 + VREV32 V12.H8, V12.H8 + VREV32 V13.H8, V13.H8 + VREV32 V14.H8, V14.H8 + VREV32 V15.H8, V15.H8 + + // V10 += V15; V5 <<<= ((V10 XOR V5), 12) + // ... + VADD V15.S4, V10.S4, V10.S4 + VADD V12.S4, V11.S4, V11.S4 + VADD V13.S4, V8.S4, V8.S4 + VADD V14.S4, V9.S4, V9.S4 + VEOR V10.B16, V5.B16, V16.B16 + VEOR V11.B16, V6.B16, V17.B16 + VEOR V8.B16, V7.B16, V18.B16 + VEOR V9.B16, V4.B16, V19.B16 + VSHL $12, V16.S4, V5.S4 + VSHL $12, V17.S4, V6.S4 + VSHL $12, V18.S4, V7.S4 + VSHL $12, V19.S4, V4.S4 + VSRI $20, V16.S4, V5.S4 + VSRI $20, V17.S4, V6.S4 + VSRI $20, V18.S4, V7.S4 + VSRI $20, V19.S4, V4.S4 + + // V0 += V5; V15 <<<= ((V0 XOR V15), 8) + // ... + VADD V5.S4, V0.S4, V0.S4 + VADD V6.S4, V1.S4, V1.S4 + VADD V7.S4, V2.S4, V2.S4 + VADD V4.S4, V3.S4, V3.S4 + VEOR V0.B16, V15.B16, V15.B16 + VEOR V1.B16, V12.B16, V12.B16 + VEOR V2.B16, V13.B16, V13.B16 + VEOR V3.B16, V14.B16, V14.B16 + VTBL V31.B16, [V12.B16], V12.B16 + VTBL V31.B16, [V13.B16], V13.B16 + VTBL V31.B16, [V14.B16], V14.B16 + VTBL V31.B16, [V15.B16], V15.B16 + + // V10 += V15; V5 <<<= ((V10 XOR V5), 7) + // ... + VADD V15.S4, V10.S4, V10.S4 + VADD V12.S4, V11.S4, V11.S4 + VADD V13.S4, V8.S4, V8.S4 + VADD V14.S4, V9.S4, V9.S4 + VEOR V10.B16, V5.B16, V16.B16 + VEOR V11.B16, V6.B16, V17.B16 + VEOR V8.B16, V7.B16, V18.B16 + VEOR V9.B16, V4.B16, V19.B16 + VSHL $7, V16.S4, V5.S4 + VSHL $7, V17.S4, V6.S4 + VSHL $7, V18.S4, V7.S4 + VSHL $7, V19.S4, V4.S4 + VSRI $25, V16.S4, V5.S4 + VSRI $25, V17.S4, V6.S4 + VSRI $25, V18.S4, V7.S4 + VSRI $25, V19.S4, V4.S4 + + SUB $1, R21 + CBNZ R21, chacha + + // VLD4R (R10), [V16.S4, V17.S4, V18.S4, V19.S4] + WORD $0x4D60E950 + + // VLD4R 16(R4), [V20.S4, V21.S4, V22.S4, V23.S4] + WORD $0x4DFFE894 + VADD V30.S4, V12.S4, V12.S4 + VADD V16.S4, V0.S4, V0.S4 + VADD V17.S4, V1.S4, V1.S4 + VADD V18.S4, V2.S4, V2.S4 + VADD V19.S4, V3.S4, V3.S4 + // VLD4R 16(R4), [V24.S4, V25.S4, V26.S4, V27.S4] + WORD $0x4DFFE898 + // restore R4 + SUB $32, R4 + + // load counter + nonce + // VLD1R (R7), [V28.S4] + WORD $0x4D40C8FC + // VLD3R (R6), [V29.S4, V30.S4, V31.S4] + WORD $0x4D40E8DD + + VADD V20.S4, V4.S4, V4.S4 + VADD V21.S4, V5.S4, V5.S4 + VADD V22.S4, V6.S4, V6.S4 + VADD V23.S4, V7.S4, V7.S4 + VADD V24.S4, V8.S4, V8.S4 + VADD V25.S4, V9.S4, V9.S4 + VADD V26.S4, V10.S4, V10.S4 + VADD V27.S4, V11.S4, V11.S4 + VADD V28.S4, V12.S4, V12.S4 + VADD V29.S4, V13.S4, V13.S4 + VADD V30.S4, V14.S4, V14.S4 + VADD V31.S4, V15.S4, V15.S4 + + VZIP1 V1.S4, V0.S4, V16.S4 + VZIP2 V1.S4, V0.S4, V17.S4 + VZIP1 V3.S4, V2.S4, V18.S4 + VZIP2 V3.S4, V2.S4, V19.S4 + VZIP1 V5.S4, V4.S4, V20.S4 + VZIP2 V5.S4, V4.S4, V21.S4 + VZIP1 V7.S4, V6.S4, V22.S4 + VZIP2 V7.S4, V6.S4, V23.S4 + VZIP1 V9.S4, V8.S4, V24.S4 + VZIP2 V9.S4, V8.S4, V25.S4 + VZIP1 V11.S4, V10.S4, V26.S4 + VZIP2 V11.S4, V10.S4, V27.S4 + VZIP1 V13.S4, V12.S4, V28.S4 + VZIP2 V13.S4, V12.S4, V29.S4 + VZIP1 V15.S4, V14.S4, V30.S4 + VZIP2 V15.S4, V14.S4, V31.S4 + VZIP1 V18.D2, V16.D2, V0.D2 + VZIP2 V18.D2, V16.D2, V4.D2 + VZIP1 V19.D2, V17.D2, V8.D2 + VZIP2 V19.D2, V17.D2, V12.D2 + VLD1.P 64(R2), [V16.B16, V17.B16, V18.B16, V19.B16] + + VZIP1 V22.D2, V20.D2, V1.D2 + VZIP2 V22.D2, V20.D2, V5.D2 + VZIP1 V23.D2, V21.D2, V9.D2 + VZIP2 V23.D2, V21.D2, V13.D2 + VLD1.P 64(R2), [V20.B16, V21.B16, V22.B16, V23.B16] + VZIP1 V26.D2, V24.D2, V2.D2 + VZIP2 V26.D2, V24.D2, V6.D2 + VZIP1 V27.D2, V25.D2, V10.D2 + VZIP2 V27.D2, V25.D2, V14.D2 + VLD1.P 64(R2), [V24.B16, V25.B16, V26.B16, V27.B16] + VZIP1 V30.D2, V28.D2, V3.D2 + VZIP2 V30.D2, V28.D2, V7.D2 + VZIP1 V31.D2, V29.D2, V11.D2 + VZIP2 V31.D2, V29.D2, V15.D2 + VLD1.P 64(R2), [V28.B16, V29.B16, V30.B16, V31.B16] + VEOR V0.B16, V16.B16, V16.B16 + VEOR V1.B16, V17.B16, V17.B16 + VEOR V2.B16, V18.B16, V18.B16 + VEOR V3.B16, V19.B16, V19.B16 + VST1.P [V16.B16, V17.B16, V18.B16, V19.B16], 64(R1) + VEOR V4.B16, V20.B16, V20.B16 + VEOR V5.B16, V21.B16, V21.B16 + VEOR V6.B16, V22.B16, V22.B16 + VEOR V7.B16, V23.B16, V23.B16 + VST1.P [V20.B16, V21.B16, V22.B16, V23.B16], 64(R1) + VEOR V8.B16, V24.B16, V24.B16 + VEOR V9.B16, V25.B16, V25.B16 + VEOR V10.B16, V26.B16, V26.B16 + VEOR V11.B16, V27.B16, V27.B16 + VST1.P [V24.B16, V25.B16, V26.B16, V27.B16], 64(R1) + VEOR V12.B16, V28.B16, V28.B16 + VEOR V13.B16, V29.B16, V29.B16 + VEOR V14.B16, V30.B16, V30.B16 + VEOR V15.B16, V31.B16, V31.B16 + VST1.P [V28.B16, V29.B16, V30.B16, V31.B16], 64(R1) + + ADD $4, R20 + MOVW R20, (R7) // update counter + + CMP R2, R12 + BGT loop + + RET + + +DATA ·constants+0x00(SB)/4, $0x61707865 +DATA ·constants+0x04(SB)/4, $0x3320646e +DATA ·constants+0x08(SB)/4, $0x79622d32 +DATA ·constants+0x0c(SB)/4, $0x6b206574 +GLOBL ·constants(SB), NOPTR|RODATA, $32 + +DATA ·incRotMatrix+0x00(SB)/4, $0x00000000 +DATA ·incRotMatrix+0x04(SB)/4, $0x00000001 +DATA ·incRotMatrix+0x08(SB)/4, $0x00000002 +DATA ·incRotMatrix+0x0c(SB)/4, $0x00000003 +DATA ·incRotMatrix+0x10(SB)/4, $0x02010003 +DATA ·incRotMatrix+0x14(SB)/4, $0x06050407 +DATA ·incRotMatrix+0x18(SB)/4, $0x0A09080B +DATA ·incRotMatrix+0x1c(SB)/4, $0x0E0D0C0F +GLOBL ·incRotMatrix(SB), NOPTR|RODATA, $32 diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_arm64.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_arm64.go new file mode 100644 index 0000000000..ad74e23aef --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/chacha20/chacha_arm64.go @@ -0,0 +1,31 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.11 +// +build !gccgo + +package chacha20 + +const ( + haveAsm = true + bufSize = 256 +) + +//go:noescape +func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) + +func (c *Cipher) xorKeyStreamAsm(dst, src []byte) { + + if len(src) >= bufSize { + xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter) + } + + if len(src)%bufSize != 0 { + i := len(src) - len(src)%bufSize + c.buf = [bufSize]byte{} + copy(c.buf[:], src[i:]) + xorKeyStreamVX(c.buf[:], c.buf[:], &c.key, &c.nonce, &c.counter) + c.len = bufSize - copy(dst[i:], c.buf[:len(src)%bufSize]) + } +} diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go index 91520d1de0..47eac0314c 100644 --- a/vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go +++ b/vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !s390x gccgo appengine +// +build !arm64,!s390x arm64,!go1.11 gccgo appengine package chacha20 diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go index 0c1c671c40..aad645b447 100644 --- a/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go +++ b/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go @@ -6,14 +6,13 @@ package chacha20 -var haveAsm = hasVectorFacility() +import ( + "golang.org/x/sys/cpu" +) -const bufSize = 256 +var haveAsm = cpu.S390X.HasVX -// hasVectorFacility reports whether the machine supports the vector -// facility (vx). -// Implementation in asm_s390x.s. -func hasVectorFacility() bool +const bufSize = 256 // xorKeyStreamVX is an assembly implementation of XORKeyStream. It must only // be called when the vector facility is available. diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.s b/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.s index 98427c5e22..57df404465 100644 --- a/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.s +++ b/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.s @@ -258,26 +258,3 @@ tail: MOVD R8, R3 MOVD $0, R4 JMP continue - -// func hasVectorFacility() bool -TEXT ·hasVectorFacility(SB), NOSPLIT, $24-1 - MOVD $x-24(SP), R1 - XC $24, 0(R1), 0(R1) // clear the storage - MOVD $2, R0 // R0 is the number of double words stored -1 - WORD $0xB2B01000 // STFLE 0(R1) - XOR R0, R0 // reset the value of R0 - MOVBZ z-8(SP), R1 - AND $0x40, R1 - BEQ novector - -vectorinstalled: - // check if the vector instruction has been enabled - VLEIB $0, $0xF, V16 - VLGVB $0, V16, R1 - CMPBNE R1, $0xF, novector - MOVB $1, ret+0(FP) // have vx - RET - -novector: - MOVB $0, ret+0(FP) // no vx - RET diff --git a/vendor/golang.org/x/crypto/openpgp/write.go b/vendor/golang.org/x/crypto/openpgp/write.go index d6dede74e9..4ee71784eb 100644 --- a/vendor/golang.org/x/crypto/openpgp/write.go +++ b/vendor/golang.org/x/crypto/openpgp/write.go @@ -271,6 +271,7 @@ func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHint // These are the possible hash functions that we'll use for the signature. candidateHashes := []uint8{ hashToHashId(crypto.SHA256), + hashToHashId(crypto.SHA384), hashToHashId(crypto.SHA512), hashToHashId(crypto.SHA1), hashToHashId(crypto.RIPEMD160), @@ -349,6 +350,7 @@ func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Con // These are the possible hash functions that we'll use for the signature. candidateHashes := []uint8{ hashToHashId(crypto.SHA256), + hashToHashId(crypto.SHA384), hashToHashId(crypto.SHA512), hashToHashId(crypto.SHA1), hashToHashId(crypto.RIPEMD160), diff --git a/vendor/golang.org/x/crypto/pkcs12/pkcs12.go b/vendor/golang.org/x/crypto/pkcs12/pkcs12.go index eff9ad3a98..55f7691d48 100644 --- a/vendor/golang.org/x/crypto/pkcs12/pkcs12.go +++ b/vendor/golang.org/x/crypto/pkcs12/pkcs12.go @@ -7,6 +7,9 @@ // This implementation is distilled from https://tools.ietf.org/html/rfc7292 // and referenced documents. It is intended for decoding P12/PFX-stored // certificates and keys for use with the crypto/tls package. +// +// This package is frozen. If it's missing functionality you need, consider +// an alternative like software.sslmate.com/src/go-pkcs12. package pkcs12 import ( @@ -100,7 +103,7 @@ func unmarshal(in []byte, out interface{}) error { return nil } -// ConvertToPEM converts all "safe bags" contained in pfxData to PEM blocks. +// ToPEM converts all "safe bags" contained in pfxData to PEM blocks. func ToPEM(pfxData []byte, password string) ([]*pem.Block, error) { encodedPassword, err := bmpString(password) if err != nil { @@ -208,7 +211,7 @@ func convertAttribute(attribute *pkcs12Attribute) (key, value string, err error) // Decode extracts a certificate and private key from pfxData. This function // assumes that there is only one certificate and only one private key in the -// pfxData. +// pfxData; if there are more use ToPEM instead. func Decode(pfxData []byte, password string) (privateKey interface{}, certificate *x509.Certificate, err error) { encodedPassword, err := bmpString(password) if err != nil { diff --git a/vendor/golang.org/x/crypto/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go new file mode 100644 index 0000000000..8387d29998 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go @@ -0,0 +1,11 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 gccgo appengine + +package poly1305 + +type mac struct{ macGeneric } + +func newMAC(key *[32]byte) mac { return mac{newMACGeneric(key)} } diff --git a/vendor/golang.org/x/crypto/poly1305/poly1305.go b/vendor/golang.org/x/crypto/poly1305/poly1305.go index f562fa5712..d076a56235 100644 --- a/vendor/golang.org/x/crypto/poly1305/poly1305.go +++ b/vendor/golang.org/x/crypto/poly1305/poly1305.go @@ -2,21 +2,19 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -/* -Package poly1305 implements Poly1305 one-time message authentication code as -specified in https://cr.yp.to/mac/poly1305-20050329.pdf. - -Poly1305 is a fast, one-time authentication function. It is infeasible for an -attacker to generate an authenticator for a message without the key. However, a -key must only be used for a single message. Authenticating two different -messages with the same key allows an attacker to forge authenticators for other -messages with the same key. - -Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was -used with a fixed key in order to generate one-time keys from an nonce. -However, in this package AES isn't used and the one-time key is specified -directly. -*/ +// Package poly1305 implements Poly1305 one-time message authentication code as +// specified in https://cr.yp.to/mac/poly1305-20050329.pdf. +// +// Poly1305 is a fast, one-time authentication function. It is infeasible for an +// attacker to generate an authenticator for a message without the key. However, a +// key must only be used for a single message. Authenticating two different +// messages with the same key allows an attacker to forge authenticators for other +// messages with the same key. +// +// Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was +// used with a fixed key in order to generate one-time keys from an nonce. +// However, in this package AES isn't used and the one-time key is specified +// directly. package poly1305 // import "golang.org/x/crypto/poly1305" import "crypto/subtle" @@ -31,3 +29,55 @@ func Verify(mac *[16]byte, m []byte, key *[32]byte) bool { Sum(&tmp, m, key) return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1 } + +// New returns a new MAC computing an authentication +// tag of all data written to it with the given key. +// This allows writing the message progressively instead +// of passing it as a single slice. Common users should use +// the Sum function instead. +// +// The key must be unique for each message, as authenticating +// two different messages with the same key allows an attacker +// to forge messages at will. +func New(key *[32]byte) *MAC { + return &MAC{ + mac: newMAC(key), + finalized: false, + } +} + +// MAC is an io.Writer computing an authentication tag +// of the data written to it. +// +// MAC cannot be used like common hash.Hash implementations, +// because using a poly1305 key twice breaks its security. +// Therefore writing data to a running MAC after calling +// Sum causes it to panic. +type MAC struct { + mac // platform-dependent implementation + + finalized bool +} + +// Size returns the number of bytes Sum will return. +func (h *MAC) Size() int { return TagSize } + +// Write adds more data to the running message authentication code. +// It never returns an error. +// +// It must not be called after the first call of Sum. +func (h *MAC) Write(p []byte) (n int, err error) { + if h.finalized { + panic("poly1305: write to MAC after Sum") + } + return h.mac.Write(p) +} + +// Sum computes the authenticator of all data written to the +// message authentication code. +func (h *MAC) Sum(b []byte) []byte { + var mac [TagSize]byte + h.mac.Sum(&mac) + h.finalized = true + return append(b, mac[:]...) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go index 4dd72fe799..2dbf42aa53 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go +++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go @@ -6,17 +6,63 @@ package poly1305 -// This function is implemented in sum_amd64.s //go:noescape -func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]byte) +func initialize(state *[7]uint64, key *[32]byte) + +//go:noescape +func update(state *[7]uint64, msg []byte) + +//go:noescape +func finalize(tag *[TagSize]byte, state *[7]uint64) // Sum generates an authenticator for m using a one-time key and puts the // 16-byte result into out. Authenticating two different messages with the same // key allows an attacker to forge messages at will. func Sum(out *[16]byte, m []byte, key *[32]byte) { - var mPtr *byte - if len(m) > 0 { - mPtr = &m[0] + h := newMAC(key) + h.Write(m) + h.Sum(out) +} + +func newMAC(key *[32]byte) (h mac) { + initialize(&h.state, key) + return +} + +type mac struct { + state [7]uint64 // := uint64{ h0, h1, h2, r0, r1, pad0, pad1 } + + buffer [TagSize]byte + offset int +} + +func (h *mac) Write(p []byte) (n int, err error) { + n = len(p) + if h.offset > 0 { + remaining := TagSize - h.offset + if n < remaining { + h.offset += copy(h.buffer[h.offset:], p) + return n, nil + } + copy(h.buffer[h.offset:], p[:remaining]) + p = p[remaining:] + h.offset = 0 + update(&h.state, h.buffer[:]) + } + if nn := len(p) - (len(p) % TagSize); nn > 0 { + update(&h.state, p[:nn]) + p = p[nn:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return n, nil +} + +func (h *mac) Sum(out *[16]byte) { + state := h.state + if h.offset > 0 { + update(&state, h.buffer[:h.offset]) } - poly1305(out, mPtr, uint64(len(m)), key) + finalize(out, &state) } diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.s b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s index 2edae63828..7d600f13cc 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_amd64.s +++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s @@ -58,20 +58,17 @@ DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC GLOBL ·poly1305Mask<>(SB), RODATA, $16 -// func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]key) -TEXT ·poly1305(SB), $0-32 - MOVQ out+0(FP), DI - MOVQ m+8(FP), SI - MOVQ mlen+16(FP), R15 - MOVQ key+24(FP), AX - - MOVQ 0(AX), R11 - MOVQ 8(AX), R12 - ANDQ ·poly1305Mask<>(SB), R11 // r0 - ANDQ ·poly1305Mask<>+8(SB), R12 // r1 - XORQ R8, R8 // h0 - XORQ R9, R9 // h1 - XORQ R10, R10 // h2 +// func update(state *[7]uint64, msg []byte) +TEXT ·update(SB), $0-32 + MOVQ state+0(FP), DI + MOVQ msg_base+8(FP), SI + MOVQ msg_len+16(FP), R15 + + MOVQ 0(DI), R8 // h0 + MOVQ 8(DI), R9 // h1 + MOVQ 16(DI), R10 // h2 + MOVQ 24(DI), R11 // r0 + MOVQ 32(DI), R12 // r1 CMPQ R15, $16 JB bytes_between_0_and_15 @@ -109,16 +106,42 @@ flush_buffer: JMP multiply done: - MOVQ R8, AX - MOVQ R9, BX + MOVQ R8, 0(DI) + MOVQ R9, 8(DI) + MOVQ R10, 16(DI) + RET + +// func initialize(state *[7]uint64, key *[32]byte) +TEXT ·initialize(SB), $0-16 + MOVQ state+0(FP), DI + MOVQ key+8(FP), SI + + // state[0...7] is initialized with zero + MOVOU 0(SI), X0 + MOVOU 16(SI), X1 + MOVOU ·poly1305Mask<>(SB), X2 + PAND X2, X0 + MOVOU X0, 24(DI) + MOVOU X1, 40(DI) + RET + +// func finalize(tag *[TagSize]byte, state *[7]uint64) +TEXT ·finalize(SB), $0-16 + MOVQ tag+0(FP), DI + MOVQ state+8(FP), SI + + MOVQ 0(SI), AX + MOVQ 8(SI), BX + MOVQ 16(SI), CX + MOVQ AX, R8 + MOVQ BX, R9 SUBQ $0xFFFFFFFFFFFFFFFB, AX SBBQ $0xFFFFFFFFFFFFFFFF, BX - SBBQ $3, R10 + SBBQ $3, CX CMOVQCS R8, AX CMOVQCS R9, BX - MOVQ key+24(FP), R8 - ADDQ 16(R8), AX - ADCQ 24(R8), BX + ADDQ 40(SI), AX + ADCQ 48(SI), BX MOVQ AX, 0(DI) MOVQ BX, 8(DI) diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ref.go b/vendor/golang.org/x/crypto/poly1305/sum_generic.go similarity index 54% rename from vendor/golang.org/x/crypto/poly1305/sum_ref.go rename to vendor/golang.org/x/crypto/poly1305/sum_generic.go index c4d59bd098..bab76ef0d8 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_ref.go +++ b/vendor/golang.org/x/crypto/poly1305/sum_generic.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -6,21 +6,79 @@ package poly1305 import "encoding/binary" +const ( + msgBlock = uint32(1 << 24) + finalBlock = uint32(0) +) + // sumGeneric generates an authenticator for msg using a one-time key and // puts the 16-byte result into out. This is the generic implementation of // Sum and should be called if no assembly implementation is available. func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) { - var ( - h0, h1, h2, h3, h4 uint32 // the hash accumulators - r0, r1, r2, r3, r4 uint64 // the r part of the key - ) + h := newMACGeneric(key) + h.Write(msg) + h.Sum(out) +} + +func newMACGeneric(key *[32]byte) (h macGeneric) { + h.r[0] = binary.LittleEndian.Uint32(key[0:]) & 0x3ffffff + h.r[1] = (binary.LittleEndian.Uint32(key[3:]) >> 2) & 0x3ffff03 + h.r[2] = (binary.LittleEndian.Uint32(key[6:]) >> 4) & 0x3ffc0ff + h.r[3] = (binary.LittleEndian.Uint32(key[9:]) >> 6) & 0x3f03fff + h.r[4] = (binary.LittleEndian.Uint32(key[12:]) >> 8) & 0x00fffff + + h.s[0] = binary.LittleEndian.Uint32(key[16:]) + h.s[1] = binary.LittleEndian.Uint32(key[20:]) + h.s[2] = binary.LittleEndian.Uint32(key[24:]) + h.s[3] = binary.LittleEndian.Uint32(key[28:]) + return +} + +type macGeneric struct { + h, r [5]uint32 + s [4]uint32 + + buffer [TagSize]byte + offset int +} + +func (h *macGeneric) Write(p []byte) (n int, err error) { + n = len(p) + if h.offset > 0 { + remaining := TagSize - h.offset + if n < remaining { + h.offset += copy(h.buffer[h.offset:], p) + return n, nil + } + copy(h.buffer[h.offset:], p[:remaining]) + p = p[remaining:] + h.offset = 0 + updateGeneric(h.buffer[:], msgBlock, &(h.h), &(h.r)) + } + if nn := len(p) - (len(p) % TagSize); nn > 0 { + updateGeneric(p, msgBlock, &(h.h), &(h.r)) + p = p[nn:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return n, nil +} - r0 = uint64(binary.LittleEndian.Uint32(key[0:]) & 0x3ffffff) - r1 = uint64((binary.LittleEndian.Uint32(key[3:]) >> 2) & 0x3ffff03) - r2 = uint64((binary.LittleEndian.Uint32(key[6:]) >> 4) & 0x3ffc0ff) - r3 = uint64((binary.LittleEndian.Uint32(key[9:]) >> 6) & 0x3f03fff) - r4 = uint64((binary.LittleEndian.Uint32(key[12:]) >> 8) & 0x00fffff) +func (h *macGeneric) Sum(out *[16]byte) { + H, R := h.h, h.r + if h.offset > 0 { + var buffer [TagSize]byte + copy(buffer[:], h.buffer[:h.offset]) + buffer[h.offset] = 1 // invariant: h.offset < TagSize + updateGeneric(buffer[:], finalBlock, &H, &R) + } + finalizeGeneric(out, &H, &(h.s)) +} +func updateGeneric(msg []byte, flag uint32, h, r *[5]uint32) { + h0, h1, h2, h3, h4 := h[0], h[1], h[2], h[3], h[4] + r0, r1, r2, r3, r4 := uint64(r[0]), uint64(r[1]), uint64(r[2]), uint64(r[3]), uint64(r[4]) R1, R2, R3, R4 := r1*5, r2*5, r3*5, r4*5 for len(msg) >= TagSize { @@ -29,7 +87,7 @@ func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) { h1 += (binary.LittleEndian.Uint32(msg[3:]) >> 2) & 0x3ffffff h2 += (binary.LittleEndian.Uint32(msg[6:]) >> 4) & 0x3ffffff h3 += (binary.LittleEndian.Uint32(msg[9:]) >> 6) & 0x3ffffff - h4 += (binary.LittleEndian.Uint32(msg[12:]) >> 8) | (1 << 24) + h4 += (binary.LittleEndian.Uint32(msg[12:]) >> 8) | flag // h *= r d0 := (uint64(h0) * r0) + (uint64(h1) * R4) + (uint64(h2) * R3) + (uint64(h3) * R2) + (uint64(h4) * R1) @@ -52,36 +110,11 @@ func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) { msg = msg[TagSize:] } - if len(msg) > 0 { - var block [TagSize]byte - off := copy(block[:], msg) - block[off] = 0x01 - - // h += msg - h0 += binary.LittleEndian.Uint32(block[0:]) & 0x3ffffff - h1 += (binary.LittleEndian.Uint32(block[3:]) >> 2) & 0x3ffffff - h2 += (binary.LittleEndian.Uint32(block[6:]) >> 4) & 0x3ffffff - h3 += (binary.LittleEndian.Uint32(block[9:]) >> 6) & 0x3ffffff - h4 += (binary.LittleEndian.Uint32(block[12:]) >> 8) - - // h *= r - d0 := (uint64(h0) * r0) + (uint64(h1) * R4) + (uint64(h2) * R3) + (uint64(h3) * R2) + (uint64(h4) * R1) - d1 := (d0 >> 26) + (uint64(h0) * r1) + (uint64(h1) * r0) + (uint64(h2) * R4) + (uint64(h3) * R3) + (uint64(h4) * R2) - d2 := (d1 >> 26) + (uint64(h0) * r2) + (uint64(h1) * r1) + (uint64(h2) * r0) + (uint64(h3) * R4) + (uint64(h4) * R3) - d3 := (d2 >> 26) + (uint64(h0) * r3) + (uint64(h1) * r2) + (uint64(h2) * r1) + (uint64(h3) * r0) + (uint64(h4) * R4) - d4 := (d3 >> 26) + (uint64(h0) * r4) + (uint64(h1) * r3) + (uint64(h2) * r2) + (uint64(h3) * r1) + (uint64(h4) * r0) - - // h %= p - h0 = uint32(d0) & 0x3ffffff - h1 = uint32(d1) & 0x3ffffff - h2 = uint32(d2) & 0x3ffffff - h3 = uint32(d3) & 0x3ffffff - h4 = uint32(d4) & 0x3ffffff + h[0], h[1], h[2], h[3], h[4] = h0, h1, h2, h3, h4 +} - h0 += uint32(d4>>26) * 5 - h1 += h0 >> 26 - h0 = h0 & 0x3ffffff - } +func finalizeGeneric(out *[TagSize]byte, h *[5]uint32, s *[4]uint32) { + h0, h1, h2, h3, h4 := h[0], h[1], h[2], h[3], h[4] // h %= p reduction h2 += h1 >> 26 @@ -123,13 +156,13 @@ func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) { // s: the s part of the key // tag = (h + s) % (2^128) - t := uint64(h0) + uint64(binary.LittleEndian.Uint32(key[16:])) + t := uint64(h0) + uint64(s[0]) h0 = uint32(t) - t = uint64(h1) + uint64(binary.LittleEndian.Uint32(key[20:])) + (t >> 32) + t = uint64(h1) + uint64(s[1]) + (t >> 32) h1 = uint32(t) - t = uint64(h2) + uint64(binary.LittleEndian.Uint32(key[24:])) + (t >> 32) + t = uint64(h2) + uint64(s[2]) + (t >> 32) h2 = uint32(t) - t = uint64(h3) + uint64(binary.LittleEndian.Uint32(key[28:])) + (t >> 32) + t = uint64(h3) + uint64(s[3]) + (t >> 32) h3 = uint32(t) binary.LittleEndian.PutUint32(out[0:], h0) diff --git a/vendor/golang.org/x/crypto/poly1305/sum_noasm.go b/vendor/golang.org/x/crypto/poly1305/sum_noasm.go index 751eec5274..fcdef46ab6 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_noasm.go +++ b/vendor/golang.org/x/crypto/poly1305/sum_noasm.go @@ -10,5 +10,7 @@ package poly1305 // 16-byte result into out. Authenticating two different messages with the same // key allows an attacker to forge messages at will. func Sum(out *[TagSize]byte, msg []byte, key *[32]byte) { - sumGeneric(out, msg, key) + h := newMAC(key) + h.Write(msg) + h.Sum(out) } diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.go b/vendor/golang.org/x/crypto/poly1305/sum_s390x.go index 7a266cece4..ec99e07e9f 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_s390x.go +++ b/vendor/golang.org/x/crypto/poly1305/sum_s390x.go @@ -6,16 +6,9 @@ package poly1305 -// hasVectorFacility reports whether the machine supports -// the vector facility (vx). -func hasVectorFacility() bool - -// hasVMSLFacility reports whether the machine supports -// Vector Multiply Sum Logical (VMSL). -func hasVMSLFacility() bool - -var hasVX = hasVectorFacility() -var hasVMSL = hasVMSLFacility() +import ( + "golang.org/x/sys/cpu" +) // poly1305vx is an assembly implementation of Poly1305 that uses vector // instructions. It must only be called if the vector facility (vx) is @@ -33,12 +26,12 @@ func poly1305vmsl(out *[16]byte, m *byte, mlen uint64, key *[32]byte) // 16-byte result into out. Authenticating two different messages with the same // key allows an attacker to forge messages at will. func Sum(out *[16]byte, m []byte, key *[32]byte) { - if hasVX { + if cpu.S390X.HasVX { var mPtr *byte if len(m) > 0 { mPtr = &m[0] } - if hasVMSL && len(m) > 256 { + if cpu.S390X.HasVXE && len(m) > 256 { poly1305vmsl(out, mPtr, uint64(len(m)), key) } else { poly1305vx(out, mPtr, uint64(len(m)), key) diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.s b/vendor/golang.org/x/crypto/poly1305/sum_s390x.s index 356c07a6c2..ca5a309d86 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_s390x.s +++ b/vendor/golang.org/x/crypto/poly1305/sum_s390x.s @@ -376,25 +376,3 @@ b1: MOVD $0, R3 BR multiply - -TEXT ·hasVectorFacility(SB), NOSPLIT, $24-1 - MOVD $x-24(SP), R1 - XC $24, 0(R1), 0(R1) // clear the storage - MOVD $2, R0 // R0 is the number of double words stored -1 - WORD $0xB2B01000 // STFLE 0(R1) - XOR R0, R0 // reset the value of R0 - MOVBZ z-8(SP), R1 - AND $0x40, R1 - BEQ novector - -vectorinstalled: - // check if the vector instruction has been enabled - VLEIB $0, $0xF, V16 - VLGVB $0, V16, R1 - CMPBNE R1, $0xF, novector - MOVB $1, ret+0(FP) // have vx - RET - -novector: - MOVB $0, ret+0(FP) // no vx - RET diff --git a/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s b/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s index e548020b14..e60bbc1d7f 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s +++ b/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s @@ -907,25 +907,3 @@ square: MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5) BR next - -TEXT ·hasVMSLFacility(SB), NOSPLIT, $24-1 - MOVD $x-24(SP), R1 - XC $24, 0(R1), 0(R1) // clear the storage - MOVD $2, R0 // R0 is the number of double words stored -1 - WORD $0xB2B01000 // STFLE 0(R1) - XOR R0, R0 // reset the value of R0 - MOVBZ z-8(SP), R1 - AND $0x01, R1 - BEQ novmsl - -vectorinstalled: - // check if the vector instruction has been enabled - VLEIB $0, $0xF, V16 - VLGVB $0, V16, R1 - CMPBNE R1, $0xF, novmsl - MOVB $1, ret+0(FP) // have vx - RET - -novmsl: - MOVB $0, ret+0(FP) // no vx - RET diff --git a/vendor/golang.org/x/net/html/atom/atom.go b/vendor/golang.org/x/net/html/atom/atom.go deleted file mode 100644 index cd0a8ac154..0000000000 --- a/vendor/golang.org/x/net/html/atom/atom.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package atom provides integer codes (also known as atoms) for a fixed set of -// frequently occurring HTML strings: tag names and attribute keys such as "p" -// and "id". -// -// Sharing an atom's name between all elements with the same tag can result in -// fewer string allocations when tokenizing and parsing HTML. Integer -// comparisons are also generally faster than string comparisons. -// -// The value of an atom's particular code is not guaranteed to stay the same -// between versions of this package. Neither is any ordering guaranteed: -// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to -// be dense. The only guarantees are that e.g. looking up "div" will yield -// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0. -package atom // import "golang.org/x/net/html/atom" - -// Atom is an integer code for a string. The zero value maps to "". -type Atom uint32 - -// String returns the atom's name. -func (a Atom) String() string { - start := uint32(a >> 8) - n := uint32(a & 0xff) - if start+n > uint32(len(atomText)) { - return "" - } - return atomText[start : start+n] -} - -func (a Atom) string() string { - return atomText[a>>8 : a>>8+a&0xff] -} - -// fnv computes the FNV hash with an arbitrary starting value h. -func fnv(h uint32, s []byte) uint32 { - for i := range s { - h ^= uint32(s[i]) - h *= 16777619 - } - return h -} - -func match(s string, t []byte) bool { - for i, c := range t { - if s[i] != c { - return false - } - } - return true -} - -// Lookup returns the atom whose name is s. It returns zero if there is no -// such atom. The lookup is case sensitive. -func Lookup(s []byte) Atom { - if len(s) == 0 || len(s) > maxAtomLen { - return 0 - } - h := fnv(hash0, s) - if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { - return a - } - if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { - return a - } - return 0 -} - -// String returns a string whose contents are equal to s. In that sense, it is -// equivalent to string(s) but may be more efficient. -func String(s []byte) string { - if a := Lookup(s); a != 0 { - return a.String() - } - return string(s) -} diff --git a/vendor/golang.org/x/net/html/atom/gen.go b/vendor/golang.org/x/net/html/atom/gen.go deleted file mode 100644 index 5d052781bc..0000000000 --- a/vendor/golang.org/x/net/html/atom/gen.go +++ /dev/null @@ -1,712 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -//go:generate go run gen.go -//go:generate go run gen.go -test - -package main - -import ( - "bytes" - "flag" - "fmt" - "go/format" - "io/ioutil" - "math/rand" - "os" - "sort" - "strings" -) - -// identifier converts s to a Go exported identifier. -// It converts "div" to "Div" and "accept-charset" to "AcceptCharset". -func identifier(s string) string { - b := make([]byte, 0, len(s)) - cap := true - for _, c := range s { - if c == '-' { - cap = true - continue - } - if cap && 'a' <= c && c <= 'z' { - c -= 'a' - 'A' - } - cap = false - b = append(b, byte(c)) - } - return string(b) -} - -var test = flag.Bool("test", false, "generate table_test.go") - -func genFile(name string, buf *bytes.Buffer) { - b, err := format.Source(buf.Bytes()) - if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - if err := ioutil.WriteFile(name, b, 0644); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -func main() { - flag.Parse() - - var all []string - all = append(all, elements...) - all = append(all, attributes...) - all = append(all, eventHandlers...) - all = append(all, extra...) - sort.Strings(all) - - // uniq - lists have dups - w := 0 - for _, s := range all { - if w == 0 || all[w-1] != s { - all[w] = s - w++ - } - } - all = all[:w] - - if *test { - var buf bytes.Buffer - fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n") - fmt.Fprintln(&buf, "//go:generate go run gen.go -test\n") - fmt.Fprintln(&buf, "package atom\n") - fmt.Fprintln(&buf, "var testAtomList = []string{") - for _, s := range all { - fmt.Fprintf(&buf, "\t%q,\n", s) - } - fmt.Fprintln(&buf, "}") - - genFile("table_test.go", &buf) - return - } - - // Find hash that minimizes table size. - var best *table - for i := 0; i < 1000000; i++ { - if best != nil && 1<<(best.k-1) < len(all) { - break - } - h := rand.Uint32() - for k := uint(0); k <= 16; k++ { - if best != nil && k >= best.k { - break - } - var t table - if t.init(h, k, all) { - best = &t - break - } - } - } - if best == nil { - fmt.Fprintf(os.Stderr, "failed to construct string table\n") - os.Exit(1) - } - - // Lay out strings, using overlaps when possible. - layout := append([]string{}, all...) - - // Remove strings that are substrings of other strings - for changed := true; changed; { - changed = false - for i, s := range layout { - if s == "" { - continue - } - for j, t := range layout { - if i != j && t != "" && strings.Contains(s, t) { - changed = true - layout[j] = "" - } - } - } - } - - // Join strings where one suffix matches another prefix. - for { - // Find best i, j, k such that layout[i][len-k:] == layout[j][:k], - // maximizing overlap length k. - besti := -1 - bestj := -1 - bestk := 0 - for i, s := range layout { - if s == "" { - continue - } - for j, t := range layout { - if i == j { - continue - } - for k := bestk + 1; k <= len(s) && k <= len(t); k++ { - if s[len(s)-k:] == t[:k] { - besti = i - bestj = j - bestk = k - } - } - } - } - if bestk > 0 { - layout[besti] += layout[bestj][bestk:] - layout[bestj] = "" - continue - } - break - } - - text := strings.Join(layout, "") - - atom := map[string]uint32{} - for _, s := range all { - off := strings.Index(text, s) - if off < 0 { - panic("lost string " + s) - } - atom[s] = uint32(off<<8 | len(s)) - } - - var buf bytes.Buffer - // Generate the Go code. - fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n") - fmt.Fprintln(&buf, "//go:generate go run gen.go\n") - fmt.Fprintln(&buf, "package atom\n\nconst (") - - // compute max len - maxLen := 0 - for _, s := range all { - if maxLen < len(s) { - maxLen = len(s) - } - fmt.Fprintf(&buf, "\t%s Atom = %#x\n", identifier(s), atom[s]) - } - fmt.Fprintln(&buf, ")\n") - - fmt.Fprintf(&buf, "const hash0 = %#x\n\n", best.h0) - fmt.Fprintf(&buf, "const maxAtomLen = %d\n\n", maxLen) - - fmt.Fprintf(&buf, "var table = [1<<%d]Atom{\n", best.k) - for i, s := range best.tab { - if s == "" { - continue - } - fmt.Fprintf(&buf, "\t%#x: %#x, // %s\n", i, atom[s], s) - } - fmt.Fprintf(&buf, "}\n") - datasize := (1 << best.k) * 4 - - fmt.Fprintln(&buf, "const atomText =") - textsize := len(text) - for len(text) > 60 { - fmt.Fprintf(&buf, "\t%q +\n", text[:60]) - text = text[60:] - } - fmt.Fprintf(&buf, "\t%q\n\n", text) - - genFile("table.go", &buf) - - fmt.Fprintf(os.Stdout, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize) -} - -type byLen []string - -func (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) } -func (x byLen) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (x byLen) Len() int { return len(x) } - -// fnv computes the FNV hash with an arbitrary starting value h. -func fnv(h uint32, s string) uint32 { - for i := 0; i < len(s); i++ { - h ^= uint32(s[i]) - h *= 16777619 - } - return h -} - -// A table represents an attempt at constructing the lookup table. -// The lookup table uses cuckoo hashing, meaning that each string -// can be found in one of two positions. -type table struct { - h0 uint32 - k uint - mask uint32 - tab []string -} - -// hash returns the two hashes for s. -func (t *table) hash(s string) (h1, h2 uint32) { - h := fnv(t.h0, s) - h1 = h & t.mask - h2 = (h >> 16) & t.mask - return -} - -// init initializes the table with the given parameters. -// h0 is the initial hash value, -// k is the number of bits of hash value to use, and -// x is the list of strings to store in the table. -// init returns false if the table cannot be constructed. -func (t *table) init(h0 uint32, k uint, x []string) bool { - t.h0 = h0 - t.k = k - t.tab = make([]string, 1< len(t.tab) { - return false - } - s := t.tab[i] - h1, h2 := t.hash(s) - j := h1 + h2 - i - if t.tab[j] != "" && !t.push(j, depth+1) { - return false - } - t.tab[j] = s - return true -} - -// The lists of element names and attribute keys were taken from -// https://html.spec.whatwg.org/multipage/indices.html#index -// as of the "HTML Living Standard - Last Updated 16 April 2018" version. - -// "command", "keygen" and "menuitem" have been removed from the spec, -// but are kept here for backwards compatibility. -var elements = []string{ - "a", - "abbr", - "address", - "area", - "article", - "aside", - "audio", - "b", - "base", - "bdi", - "bdo", - "blockquote", - "body", - "br", - "button", - "canvas", - "caption", - "cite", - "code", - "col", - "colgroup", - "command", - "data", - "datalist", - "dd", - "del", - "details", - "dfn", - "dialog", - "div", - "dl", - "dt", - "em", - "embed", - "fieldset", - "figcaption", - "figure", - "footer", - "form", - "h1", - "h2", - "h3", - "h4", - "h5", - "h6", - "head", - "header", - "hgroup", - "hr", - "html", - "i", - "iframe", - "img", - "input", - "ins", - "kbd", - "keygen", - "label", - "legend", - "li", - "link", - "main", - "map", - "mark", - "menu", - "menuitem", - "meta", - "meter", - "nav", - "noscript", - "object", - "ol", - "optgroup", - "option", - "output", - "p", - "param", - "picture", - "pre", - "progress", - "q", - "rp", - "rt", - "ruby", - "s", - "samp", - "script", - "section", - "select", - "slot", - "small", - "source", - "span", - "strong", - "style", - "sub", - "summary", - "sup", - "table", - "tbody", - "td", - "template", - "textarea", - "tfoot", - "th", - "thead", - "time", - "title", - "tr", - "track", - "u", - "ul", - "var", - "video", - "wbr", -} - -// https://html.spec.whatwg.org/multipage/indices.html#attributes-3 -// -// "challenge", "command", "contextmenu", "dropzone", "icon", "keytype", "mediagroup", -// "radiogroup", "spellcheck", "scoped", "seamless", "sortable" and "sorted" have been removed from the spec, -// but are kept here for backwards compatibility. -var attributes = []string{ - "abbr", - "accept", - "accept-charset", - "accesskey", - "action", - "allowfullscreen", - "allowpaymentrequest", - "allowusermedia", - "alt", - "as", - "async", - "autocomplete", - "autofocus", - "autoplay", - "challenge", - "charset", - "checked", - "cite", - "class", - "color", - "cols", - "colspan", - "command", - "content", - "contenteditable", - "contextmenu", - "controls", - "coords", - "crossorigin", - "data", - "datetime", - "default", - "defer", - "dir", - "dirname", - "disabled", - "download", - "draggable", - "dropzone", - "enctype", - "for", - "form", - "formaction", - "formenctype", - "formmethod", - "formnovalidate", - "formtarget", - "headers", - "height", - "hidden", - "high", - "href", - "hreflang", - "http-equiv", - "icon", - "id", - "inputmode", - "integrity", - "is", - "ismap", - "itemid", - "itemprop", - "itemref", - "itemscope", - "itemtype", - "keytype", - "kind", - "label", - "lang", - "list", - "loop", - "low", - "manifest", - "max", - "maxlength", - "media", - "mediagroup", - "method", - "min", - "minlength", - "multiple", - "muted", - "name", - "nomodule", - "nonce", - "novalidate", - "open", - "optimum", - "pattern", - "ping", - "placeholder", - "playsinline", - "poster", - "preload", - "radiogroup", - "readonly", - "referrerpolicy", - "rel", - "required", - "reversed", - "rows", - "rowspan", - "sandbox", - "spellcheck", - "scope", - "scoped", - "seamless", - "selected", - "shape", - "size", - "sizes", - "sortable", - "sorted", - "slot", - "span", - "spellcheck", - "src", - "srcdoc", - "srclang", - "srcset", - "start", - "step", - "style", - "tabindex", - "target", - "title", - "translate", - "type", - "typemustmatch", - "updateviacache", - "usemap", - "value", - "width", - "workertype", - "wrap", -} - -// "onautocomplete", "onautocompleteerror", "onmousewheel", -// "onshow" and "onsort" have been removed from the spec, -// but are kept here for backwards compatibility. -var eventHandlers = []string{ - "onabort", - "onautocomplete", - "onautocompleteerror", - "onauxclick", - "onafterprint", - "onbeforeprint", - "onbeforeunload", - "onblur", - "oncancel", - "oncanplay", - "oncanplaythrough", - "onchange", - "onclick", - "onclose", - "oncontextmenu", - "oncopy", - "oncuechange", - "oncut", - "ondblclick", - "ondrag", - "ondragend", - "ondragenter", - "ondragexit", - "ondragleave", - "ondragover", - "ondragstart", - "ondrop", - "ondurationchange", - "onemptied", - "onended", - "onerror", - "onfocus", - "onhashchange", - "oninput", - "oninvalid", - "onkeydown", - "onkeypress", - "onkeyup", - "onlanguagechange", - "onload", - "onloadeddata", - "onloadedmetadata", - "onloadend", - "onloadstart", - "onmessage", - "onmessageerror", - "onmousedown", - "onmouseenter", - "onmouseleave", - "onmousemove", - "onmouseout", - "onmouseover", - "onmouseup", - "onmousewheel", - "onwheel", - "onoffline", - "ononline", - "onpagehide", - "onpageshow", - "onpaste", - "onpause", - "onplay", - "onplaying", - "onpopstate", - "onprogress", - "onratechange", - "onreset", - "onresize", - "onrejectionhandled", - "onscroll", - "onsecuritypolicyviolation", - "onseeked", - "onseeking", - "onselect", - "onshow", - "onsort", - "onstalled", - "onstorage", - "onsubmit", - "onsuspend", - "ontimeupdate", - "ontoggle", - "onunhandledrejection", - "onunload", - "onvolumechange", - "onwaiting", -} - -// extra are ad-hoc values not covered by any of the lists above. -var extra = []string{ - "acronym", - "align", - "annotation", - "annotation-xml", - "applet", - "basefont", - "bgsound", - "big", - "blink", - "center", - "color", - "desc", - "face", - "font", - "foreignObject", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive. - "foreignobject", - "frame", - "frameset", - "image", - "isindex", - "listing", - "malignmark", - "marquee", - "math", - "mglyph", - "mi", - "mn", - "mo", - "ms", - "mtext", - "nobr", - "noembed", - "noframes", - "plaintext", - "prompt", - "public", - "rb", - "rtc", - "spacer", - "strike", - "svg", - "system", - "tt", - "xmp", -} diff --git a/vendor/golang.org/x/net/html/atom/table.go b/vendor/golang.org/x/net/html/atom/table.go deleted file mode 100644 index 2a938864cb..0000000000 --- a/vendor/golang.org/x/net/html/atom/table.go +++ /dev/null @@ -1,783 +0,0 @@ -// Code generated by go generate gen.go; DO NOT EDIT. - -//go:generate go run gen.go - -package atom - -const ( - A Atom = 0x1 - Abbr Atom = 0x4 - Accept Atom = 0x1a06 - AcceptCharset Atom = 0x1a0e - Accesskey Atom = 0x2c09 - Acronym Atom = 0xaa07 - Action Atom = 0x27206 - Address Atom = 0x6f307 - Align Atom = 0xb105 - Allowfullscreen Atom = 0x2080f - Allowpaymentrequest Atom = 0xc113 - Allowusermedia Atom = 0xdd0e - Alt Atom = 0xf303 - Annotation Atom = 0x1c90a - AnnotationXml Atom = 0x1c90e - Applet Atom = 0x31906 - Area Atom = 0x35604 - Article Atom = 0x3fc07 - As Atom = 0x3c02 - Aside Atom = 0x10705 - Async Atom = 0xff05 - Audio Atom = 0x11505 - Autocomplete Atom = 0x2780c - Autofocus Atom = 0x12109 - Autoplay Atom = 0x13c08 - B Atom = 0x101 - Base Atom = 0x3b04 - Basefont Atom = 0x3b08 - Bdi Atom = 0xba03 - Bdo Atom = 0x14b03 - Bgsound Atom = 0x15e07 - Big Atom = 0x17003 - Blink Atom = 0x17305 - Blockquote Atom = 0x1870a - Body Atom = 0x2804 - Br Atom = 0x202 - Button Atom = 0x19106 - Canvas Atom = 0x10306 - Caption Atom = 0x23107 - Center Atom = 0x22006 - Challenge Atom = 0x29b09 - Charset Atom = 0x2107 - Checked Atom = 0x47907 - Cite Atom = 0x19c04 - Class Atom = 0x56405 - Code Atom = 0x5c504 - Col Atom = 0x1ab03 - Colgroup Atom = 0x1ab08 - Color Atom = 0x1bf05 - Cols Atom = 0x1c404 - Colspan Atom = 0x1c407 - Command Atom = 0x1d707 - Content Atom = 0x58b07 - Contenteditable Atom = 0x58b0f - Contextmenu Atom = 0x3800b - Controls Atom = 0x1de08 - Coords Atom = 0x1ea06 - Crossorigin Atom = 0x1fb0b - Data Atom = 0x4a504 - Datalist Atom = 0x4a508 - Datetime Atom = 0x2b808 - Dd Atom = 0x2d702 - Default Atom = 0x10a07 - Defer Atom = 0x5c705 - Del Atom = 0x45203 - Desc Atom = 0x56104 - Details Atom = 0x7207 - Dfn Atom = 0x8703 - Dialog Atom = 0xbb06 - Dir Atom = 0x9303 - Dirname Atom = 0x9307 - Disabled Atom = 0x16408 - Div Atom = 0x16b03 - Dl Atom = 0x5e602 - Download Atom = 0x46308 - Draggable Atom = 0x17a09 - Dropzone Atom = 0x40508 - Dt Atom = 0x64b02 - Em Atom = 0x6e02 - Embed Atom = 0x6e05 - Enctype Atom = 0x28d07 - Face Atom = 0x21e04 - Fieldset Atom = 0x22608 - Figcaption Atom = 0x22e0a - Figure Atom = 0x24806 - Font Atom = 0x3f04 - Footer Atom = 0xf606 - For Atom = 0x25403 - ForeignObject Atom = 0x2540d - Foreignobject Atom = 0x2610d - Form Atom = 0x26e04 - Formaction Atom = 0x26e0a - Formenctype Atom = 0x2890b - Formmethod Atom = 0x2a40a - Formnovalidate Atom = 0x2ae0e - Formtarget Atom = 0x2c00a - Frame Atom = 0x8b05 - Frameset Atom = 0x8b08 - H1 Atom = 0x15c02 - H2 Atom = 0x2de02 - H3 Atom = 0x30d02 - H4 Atom = 0x34502 - H5 Atom = 0x34f02 - H6 Atom = 0x64d02 - Head Atom = 0x33104 - Header Atom = 0x33106 - Headers Atom = 0x33107 - Height Atom = 0x5206 - Hgroup Atom = 0x2ca06 - Hidden Atom = 0x2d506 - High Atom = 0x2db04 - Hr Atom = 0x15702 - Href Atom = 0x2e004 - Hreflang Atom = 0x2e008 - Html Atom = 0x5604 - HttpEquiv Atom = 0x2e80a - I Atom = 0x601 - Icon Atom = 0x58a04 - Id Atom = 0x10902 - Iframe Atom = 0x2fc06 - Image Atom = 0x30205 - Img Atom = 0x30703 - Input Atom = 0x44b05 - Inputmode Atom = 0x44b09 - Ins Atom = 0x20403 - Integrity Atom = 0x23f09 - Is Atom = 0x16502 - Isindex Atom = 0x30f07 - Ismap Atom = 0x31605 - Itemid Atom = 0x38b06 - Itemprop Atom = 0x19d08 - Itemref Atom = 0x3cd07 - Itemscope Atom = 0x67109 - Itemtype Atom = 0x31f08 - Kbd Atom = 0xb903 - Keygen Atom = 0x3206 - Keytype Atom = 0xd607 - Kind Atom = 0x17704 - Label Atom = 0x5905 - Lang Atom = 0x2e404 - Legend Atom = 0x18106 - Li Atom = 0xb202 - Link Atom = 0x17404 - List Atom = 0x4a904 - Listing Atom = 0x4a907 - Loop Atom = 0x5d04 - Low Atom = 0xc303 - Main Atom = 0x1004 - Malignmark Atom = 0xb00a - Manifest Atom = 0x6d708 - Map Atom = 0x31803 - Mark Atom = 0xb604 - Marquee Atom = 0x32707 - Math Atom = 0x32e04 - Max Atom = 0x33d03 - Maxlength Atom = 0x33d09 - Media Atom = 0xe605 - Mediagroup Atom = 0xe60a - Menu Atom = 0x38704 - Menuitem Atom = 0x38708 - Meta Atom = 0x4b804 - Meter Atom = 0x9805 - Method Atom = 0x2a806 - Mglyph Atom = 0x30806 - Mi Atom = 0x34702 - Min Atom = 0x34703 - Minlength Atom = 0x34709 - Mn Atom = 0x2b102 - Mo Atom = 0xa402 - Ms Atom = 0x67402 - Mtext Atom = 0x35105 - Multiple Atom = 0x35f08 - Muted Atom = 0x36705 - Name Atom = 0x9604 - Nav Atom = 0x1303 - Nobr Atom = 0x3704 - Noembed Atom = 0x6c07 - Noframes Atom = 0x8908 - Nomodule Atom = 0xa208 - Nonce Atom = 0x1a605 - Noscript Atom = 0x21608 - Novalidate Atom = 0x2b20a - Object Atom = 0x26806 - Ol Atom = 0x13702 - Onabort Atom = 0x19507 - Onafterprint Atom = 0x2360c - Onautocomplete Atom = 0x2760e - Onautocompleteerror Atom = 0x27613 - Onauxclick Atom = 0x61f0a - Onbeforeprint Atom = 0x69e0d - Onbeforeunload Atom = 0x6e70e - Onblur Atom = 0x56d06 - Oncancel Atom = 0x11908 - Oncanplay Atom = 0x14d09 - Oncanplaythrough Atom = 0x14d10 - Onchange Atom = 0x41b08 - Onclick Atom = 0x2f507 - Onclose Atom = 0x36c07 - Oncontextmenu Atom = 0x37e0d - Oncopy Atom = 0x39106 - Oncuechange Atom = 0x3970b - Oncut Atom = 0x3a205 - Ondblclick Atom = 0x3a70a - Ondrag Atom = 0x3b106 - Ondragend Atom = 0x3b109 - Ondragenter Atom = 0x3ba0b - Ondragexit Atom = 0x3c50a - Ondragleave Atom = 0x3df0b - Ondragover Atom = 0x3ea0a - Ondragstart Atom = 0x3f40b - Ondrop Atom = 0x40306 - Ondurationchange Atom = 0x41310 - Onemptied Atom = 0x40a09 - Onended Atom = 0x42307 - Onerror Atom = 0x42a07 - Onfocus Atom = 0x43107 - Onhashchange Atom = 0x43d0c - Oninput Atom = 0x44907 - Oninvalid Atom = 0x45509 - Onkeydown Atom = 0x45e09 - Onkeypress Atom = 0x46b0a - Onkeyup Atom = 0x48007 - Onlanguagechange Atom = 0x48d10 - Onload Atom = 0x49d06 - Onloadeddata Atom = 0x49d0c - Onloadedmetadata Atom = 0x4b010 - Onloadend Atom = 0x4c609 - Onloadstart Atom = 0x4cf0b - Onmessage Atom = 0x4da09 - Onmessageerror Atom = 0x4da0e - Onmousedown Atom = 0x4e80b - Onmouseenter Atom = 0x4f30c - Onmouseleave Atom = 0x4ff0c - Onmousemove Atom = 0x50b0b - Onmouseout Atom = 0x5160a - Onmouseover Atom = 0x5230b - Onmouseup Atom = 0x52e09 - Onmousewheel Atom = 0x53c0c - Onoffline Atom = 0x54809 - Ononline Atom = 0x55108 - Onpagehide Atom = 0x5590a - Onpageshow Atom = 0x5730a - Onpaste Atom = 0x57f07 - Onpause Atom = 0x59a07 - Onplay Atom = 0x5a406 - Onplaying Atom = 0x5a409 - Onpopstate Atom = 0x5ad0a - Onprogress Atom = 0x5b70a - Onratechange Atom = 0x5cc0c - Onrejectionhandled Atom = 0x5d812 - Onreset Atom = 0x5ea07 - Onresize Atom = 0x5f108 - Onscroll Atom = 0x60008 - Onsecuritypolicyviolation Atom = 0x60819 - Onseeked Atom = 0x62908 - Onseeking Atom = 0x63109 - Onselect Atom = 0x63a08 - Onshow Atom = 0x64406 - Onsort Atom = 0x64f06 - Onstalled Atom = 0x65909 - Onstorage Atom = 0x66209 - Onsubmit Atom = 0x66b08 - Onsuspend Atom = 0x67b09 - Ontimeupdate Atom = 0x400c - Ontoggle Atom = 0x68408 - Onunhandledrejection Atom = 0x68c14 - Onunload Atom = 0x6ab08 - Onvolumechange Atom = 0x6b30e - Onwaiting Atom = 0x6c109 - Onwheel Atom = 0x6ca07 - Open Atom = 0x1a304 - Optgroup Atom = 0x5f08 - Optimum Atom = 0x6d107 - Option Atom = 0x6e306 - Output Atom = 0x51d06 - P Atom = 0xc01 - Param Atom = 0xc05 - Pattern Atom = 0x6607 - Picture Atom = 0x7b07 - Ping Atom = 0xef04 - Placeholder Atom = 0x1310b - Plaintext Atom = 0x1b209 - Playsinline Atom = 0x1400b - Poster Atom = 0x2cf06 - Pre Atom = 0x47003 - Preload Atom = 0x48607 - Progress Atom = 0x5b908 - Prompt Atom = 0x53606 - Public Atom = 0x58606 - Q Atom = 0xcf01 - Radiogroup Atom = 0x30a - Rb Atom = 0x3a02 - Readonly Atom = 0x35708 - Referrerpolicy Atom = 0x3d10e - Rel Atom = 0x48703 - Required Atom = 0x24c08 - Reversed Atom = 0x8008 - Rows Atom = 0x9c04 - Rowspan Atom = 0x9c07 - Rp Atom = 0x23c02 - Rt Atom = 0x19a02 - Rtc Atom = 0x19a03 - Ruby Atom = 0xfb04 - S Atom = 0x2501 - Samp Atom = 0x7804 - Sandbox Atom = 0x12907 - Scope Atom = 0x67505 - Scoped Atom = 0x67506 - Script Atom = 0x21806 - Seamless Atom = 0x37108 - Section Atom = 0x56807 - Select Atom = 0x63c06 - Selected Atom = 0x63c08 - Shape Atom = 0x1e505 - Size Atom = 0x5f504 - Sizes Atom = 0x5f505 - Slot Atom = 0x1ef04 - Small Atom = 0x20605 - Sortable Atom = 0x65108 - Sorted Atom = 0x33706 - Source Atom = 0x37806 - Spacer Atom = 0x43706 - Span Atom = 0x9f04 - Spellcheck Atom = 0x4740a - Src Atom = 0x5c003 - Srcdoc Atom = 0x5c006 - Srclang Atom = 0x5f907 - Srcset Atom = 0x6f906 - Start Atom = 0x3fa05 - Step Atom = 0x58304 - Strike Atom = 0xd206 - Strong Atom = 0x6dd06 - Style Atom = 0x6ff05 - Sub Atom = 0x66d03 - Summary Atom = 0x70407 - Sup Atom = 0x70b03 - Svg Atom = 0x70e03 - System Atom = 0x71106 - Tabindex Atom = 0x4be08 - Table Atom = 0x59505 - Target Atom = 0x2c406 - Tbody Atom = 0x2705 - Td Atom = 0x9202 - Template Atom = 0x71408 - Textarea Atom = 0x35208 - Tfoot Atom = 0xf505 - Th Atom = 0x15602 - Thead Atom = 0x33005 - Time Atom = 0x4204 - Title Atom = 0x11005 - Tr Atom = 0xcc02 - Track Atom = 0x1ba05 - Translate Atom = 0x1f209 - Tt Atom = 0x6802 - Type Atom = 0xd904 - Typemustmatch Atom = 0x2900d - U Atom = 0xb01 - Ul Atom = 0xa702 - Updateviacache Atom = 0x460e - Usemap Atom = 0x59e06 - Value Atom = 0x1505 - Var Atom = 0x16d03 - Video Atom = 0x2f105 - Wbr Atom = 0x57c03 - Width Atom = 0x64905 - Workertype Atom = 0x71c0a - Wrap Atom = 0x72604 - Xmp Atom = 0x12f03 -) - -const hash0 = 0x81cdf10e - -const maxAtomLen = 25 - -var table = [1 << 9]Atom{ - 0x1: 0xe60a, // mediagroup - 0x2: 0x2e404, // lang - 0x4: 0x2c09, // accesskey - 0x5: 0x8b08, // frameset - 0x7: 0x63a08, // onselect - 0x8: 0x71106, // system - 0xa: 0x64905, // width - 0xc: 0x2890b, // formenctype - 0xd: 0x13702, // ol - 0xe: 0x3970b, // oncuechange - 0x10: 0x14b03, // bdo - 0x11: 0x11505, // audio - 0x12: 0x17a09, // draggable - 0x14: 0x2f105, // video - 0x15: 0x2b102, // mn - 0x16: 0x38704, // menu - 0x17: 0x2cf06, // poster - 0x19: 0xf606, // footer - 0x1a: 0x2a806, // method - 0x1b: 0x2b808, // datetime - 0x1c: 0x19507, // onabort - 0x1d: 0x460e, // updateviacache - 0x1e: 0xff05, // async - 0x1f: 0x49d06, // onload - 0x21: 0x11908, // oncancel - 0x22: 0x62908, // onseeked - 0x23: 0x30205, // image - 0x24: 0x5d812, // onrejectionhandled - 0x26: 0x17404, // link - 0x27: 0x51d06, // output - 0x28: 0x33104, // head - 0x29: 0x4ff0c, // onmouseleave - 0x2a: 0x57f07, // onpaste - 0x2b: 0x5a409, // onplaying - 0x2c: 0x1c407, // colspan - 0x2f: 0x1bf05, // color - 0x30: 0x5f504, // size - 0x31: 0x2e80a, // http-equiv - 0x33: 0x601, // i - 0x34: 0x5590a, // onpagehide - 0x35: 0x68c14, // onunhandledrejection - 0x37: 0x42a07, // onerror - 0x3a: 0x3b08, // basefont - 0x3f: 0x1303, // nav - 0x40: 0x17704, // kind - 0x41: 0x35708, // readonly - 0x42: 0x30806, // mglyph - 0x44: 0xb202, // li - 0x46: 0x2d506, // hidden - 0x47: 0x70e03, // svg - 0x48: 0x58304, // step - 0x49: 0x23f09, // integrity - 0x4a: 0x58606, // public - 0x4c: 0x1ab03, // col - 0x4d: 0x1870a, // blockquote - 0x4e: 0x34f02, // h5 - 0x50: 0x5b908, // progress - 0x51: 0x5f505, // sizes - 0x52: 0x34502, // h4 - 0x56: 0x33005, // thead - 0x57: 0xd607, // keytype - 0x58: 0x5b70a, // onprogress - 0x59: 0x44b09, // inputmode - 0x5a: 0x3b109, // ondragend - 0x5d: 0x3a205, // oncut - 0x5e: 0x43706, // spacer - 0x5f: 0x1ab08, // colgroup - 0x62: 0x16502, // is - 0x65: 0x3c02, // as - 0x66: 0x54809, // onoffline - 0x67: 0x33706, // sorted - 0x69: 0x48d10, // onlanguagechange - 0x6c: 0x43d0c, // onhashchange - 0x6d: 0x9604, // name - 0x6e: 0xf505, // tfoot - 0x6f: 0x56104, // desc - 0x70: 0x33d03, // max - 0x72: 0x1ea06, // coords - 0x73: 0x30d02, // h3 - 0x74: 0x6e70e, // onbeforeunload - 0x75: 0x9c04, // rows - 0x76: 0x63c06, // select - 0x77: 0x9805, // meter - 0x78: 0x38b06, // itemid - 0x79: 0x53c0c, // onmousewheel - 0x7a: 0x5c006, // srcdoc - 0x7d: 0x1ba05, // track - 0x7f: 0x31f08, // itemtype - 0x82: 0xa402, // mo - 0x83: 0x41b08, // onchange - 0x84: 0x33107, // headers - 0x85: 0x5cc0c, // onratechange - 0x86: 0x60819, // onsecuritypolicyviolation - 0x88: 0x4a508, // datalist - 0x89: 0x4e80b, // onmousedown - 0x8a: 0x1ef04, // slot - 0x8b: 0x4b010, // onloadedmetadata - 0x8c: 0x1a06, // accept - 0x8d: 0x26806, // object - 0x91: 0x6b30e, // onvolumechange - 0x92: 0x2107, // charset - 0x93: 0x27613, // onautocompleteerror - 0x94: 0xc113, // allowpaymentrequest - 0x95: 0x2804, // body - 0x96: 0x10a07, // default - 0x97: 0x63c08, // selected - 0x98: 0x21e04, // face - 0x99: 0x1e505, // shape - 0x9b: 0x68408, // ontoggle - 0x9e: 0x64b02, // dt - 0x9f: 0xb604, // mark - 0xa1: 0xb01, // u - 0xa4: 0x6ab08, // onunload - 0xa5: 0x5d04, // loop - 0xa6: 0x16408, // disabled - 0xaa: 0x42307, // onended - 0xab: 0xb00a, // malignmark - 0xad: 0x67b09, // onsuspend - 0xae: 0x35105, // mtext - 0xaf: 0x64f06, // onsort - 0xb0: 0x19d08, // itemprop - 0xb3: 0x67109, // itemscope - 0xb4: 0x17305, // blink - 0xb6: 0x3b106, // ondrag - 0xb7: 0xa702, // ul - 0xb8: 0x26e04, // form - 0xb9: 0x12907, // sandbox - 0xba: 0x8b05, // frame - 0xbb: 0x1505, // value - 0xbc: 0x66209, // onstorage - 0xbf: 0xaa07, // acronym - 0xc0: 0x19a02, // rt - 0xc2: 0x202, // br - 0xc3: 0x22608, // fieldset - 0xc4: 0x2900d, // typemustmatch - 0xc5: 0xa208, // nomodule - 0xc6: 0x6c07, // noembed - 0xc7: 0x69e0d, // onbeforeprint - 0xc8: 0x19106, // button - 0xc9: 0x2f507, // onclick - 0xca: 0x70407, // summary - 0xcd: 0xfb04, // ruby - 0xce: 0x56405, // class - 0xcf: 0x3f40b, // ondragstart - 0xd0: 0x23107, // caption - 0xd4: 0xdd0e, // allowusermedia - 0xd5: 0x4cf0b, // onloadstart - 0xd9: 0x16b03, // div - 0xda: 0x4a904, // list - 0xdb: 0x32e04, // math - 0xdc: 0x44b05, // input - 0xdf: 0x3ea0a, // ondragover - 0xe0: 0x2de02, // h2 - 0xe2: 0x1b209, // plaintext - 0xe4: 0x4f30c, // onmouseenter - 0xe7: 0x47907, // checked - 0xe8: 0x47003, // pre - 0xea: 0x35f08, // multiple - 0xeb: 0xba03, // bdi - 0xec: 0x33d09, // maxlength - 0xed: 0xcf01, // q - 0xee: 0x61f0a, // onauxclick - 0xf0: 0x57c03, // wbr - 0xf2: 0x3b04, // base - 0xf3: 0x6e306, // option - 0xf5: 0x41310, // ondurationchange - 0xf7: 0x8908, // noframes - 0xf9: 0x40508, // dropzone - 0xfb: 0x67505, // scope - 0xfc: 0x8008, // reversed - 0xfd: 0x3ba0b, // ondragenter - 0xfe: 0x3fa05, // start - 0xff: 0x12f03, // xmp - 0x100: 0x5f907, // srclang - 0x101: 0x30703, // img - 0x104: 0x101, // b - 0x105: 0x25403, // for - 0x106: 0x10705, // aside - 0x107: 0x44907, // oninput - 0x108: 0x35604, // area - 0x109: 0x2a40a, // formmethod - 0x10a: 0x72604, // wrap - 0x10c: 0x23c02, // rp - 0x10d: 0x46b0a, // onkeypress - 0x10e: 0x6802, // tt - 0x110: 0x34702, // mi - 0x111: 0x36705, // muted - 0x112: 0xf303, // alt - 0x113: 0x5c504, // code - 0x114: 0x6e02, // em - 0x115: 0x3c50a, // ondragexit - 0x117: 0x9f04, // span - 0x119: 0x6d708, // manifest - 0x11a: 0x38708, // menuitem - 0x11b: 0x58b07, // content - 0x11d: 0x6c109, // onwaiting - 0x11f: 0x4c609, // onloadend - 0x121: 0x37e0d, // oncontextmenu - 0x123: 0x56d06, // onblur - 0x124: 0x3fc07, // article - 0x125: 0x9303, // dir - 0x126: 0xef04, // ping - 0x127: 0x24c08, // required - 0x128: 0x45509, // oninvalid - 0x129: 0xb105, // align - 0x12b: 0x58a04, // icon - 0x12c: 0x64d02, // h6 - 0x12d: 0x1c404, // cols - 0x12e: 0x22e0a, // figcaption - 0x12f: 0x45e09, // onkeydown - 0x130: 0x66b08, // onsubmit - 0x131: 0x14d09, // oncanplay - 0x132: 0x70b03, // sup - 0x133: 0xc01, // p - 0x135: 0x40a09, // onemptied - 0x136: 0x39106, // oncopy - 0x137: 0x19c04, // cite - 0x138: 0x3a70a, // ondblclick - 0x13a: 0x50b0b, // onmousemove - 0x13c: 0x66d03, // sub - 0x13d: 0x48703, // rel - 0x13e: 0x5f08, // optgroup - 0x142: 0x9c07, // rowspan - 0x143: 0x37806, // source - 0x144: 0x21608, // noscript - 0x145: 0x1a304, // open - 0x146: 0x20403, // ins - 0x147: 0x2540d, // foreignObject - 0x148: 0x5ad0a, // onpopstate - 0x14a: 0x28d07, // enctype - 0x14b: 0x2760e, // onautocomplete - 0x14c: 0x35208, // textarea - 0x14e: 0x2780c, // autocomplete - 0x14f: 0x15702, // hr - 0x150: 0x1de08, // controls - 0x151: 0x10902, // id - 0x153: 0x2360c, // onafterprint - 0x155: 0x2610d, // foreignobject - 0x156: 0x32707, // marquee - 0x157: 0x59a07, // onpause - 0x158: 0x5e602, // dl - 0x159: 0x5206, // height - 0x15a: 0x34703, // min - 0x15b: 0x9307, // dirname - 0x15c: 0x1f209, // translate - 0x15d: 0x5604, // html - 0x15e: 0x34709, // minlength - 0x15f: 0x48607, // preload - 0x160: 0x71408, // template - 0x161: 0x3df0b, // ondragleave - 0x162: 0x3a02, // rb - 0x164: 0x5c003, // src - 0x165: 0x6dd06, // strong - 0x167: 0x7804, // samp - 0x168: 0x6f307, // address - 0x169: 0x55108, // ononline - 0x16b: 0x1310b, // placeholder - 0x16c: 0x2c406, // target - 0x16d: 0x20605, // small - 0x16e: 0x6ca07, // onwheel - 0x16f: 0x1c90a, // annotation - 0x170: 0x4740a, // spellcheck - 0x171: 0x7207, // details - 0x172: 0x10306, // canvas - 0x173: 0x12109, // autofocus - 0x174: 0xc05, // param - 0x176: 0x46308, // download - 0x177: 0x45203, // del - 0x178: 0x36c07, // onclose - 0x179: 0xb903, // kbd - 0x17a: 0x31906, // applet - 0x17b: 0x2e004, // href - 0x17c: 0x5f108, // onresize - 0x17e: 0x49d0c, // onloadeddata - 0x180: 0xcc02, // tr - 0x181: 0x2c00a, // formtarget - 0x182: 0x11005, // title - 0x183: 0x6ff05, // style - 0x184: 0xd206, // strike - 0x185: 0x59e06, // usemap - 0x186: 0x2fc06, // iframe - 0x187: 0x1004, // main - 0x189: 0x7b07, // picture - 0x18c: 0x31605, // ismap - 0x18e: 0x4a504, // data - 0x18f: 0x5905, // label - 0x191: 0x3d10e, // referrerpolicy - 0x192: 0x15602, // th - 0x194: 0x53606, // prompt - 0x195: 0x56807, // section - 0x197: 0x6d107, // optimum - 0x198: 0x2db04, // high - 0x199: 0x15c02, // h1 - 0x19a: 0x65909, // onstalled - 0x19b: 0x16d03, // var - 0x19c: 0x4204, // time - 0x19e: 0x67402, // ms - 0x19f: 0x33106, // header - 0x1a0: 0x4da09, // onmessage - 0x1a1: 0x1a605, // nonce - 0x1a2: 0x26e0a, // formaction - 0x1a3: 0x22006, // center - 0x1a4: 0x3704, // nobr - 0x1a5: 0x59505, // table - 0x1a6: 0x4a907, // listing - 0x1a7: 0x18106, // legend - 0x1a9: 0x29b09, // challenge - 0x1aa: 0x24806, // figure - 0x1ab: 0xe605, // media - 0x1ae: 0xd904, // type - 0x1af: 0x3f04, // font - 0x1b0: 0x4da0e, // onmessageerror - 0x1b1: 0x37108, // seamless - 0x1b2: 0x8703, // dfn - 0x1b3: 0x5c705, // defer - 0x1b4: 0xc303, // low - 0x1b5: 0x19a03, // rtc - 0x1b6: 0x5230b, // onmouseover - 0x1b7: 0x2b20a, // novalidate - 0x1b8: 0x71c0a, // workertype - 0x1ba: 0x3cd07, // itemref - 0x1bd: 0x1, // a - 0x1be: 0x31803, // map - 0x1bf: 0x400c, // ontimeupdate - 0x1c0: 0x15e07, // bgsound - 0x1c1: 0x3206, // keygen - 0x1c2: 0x2705, // tbody - 0x1c5: 0x64406, // onshow - 0x1c7: 0x2501, // s - 0x1c8: 0x6607, // pattern - 0x1cc: 0x14d10, // oncanplaythrough - 0x1ce: 0x2d702, // dd - 0x1cf: 0x6f906, // srcset - 0x1d0: 0x17003, // big - 0x1d2: 0x65108, // sortable - 0x1d3: 0x48007, // onkeyup - 0x1d5: 0x5a406, // onplay - 0x1d7: 0x4b804, // meta - 0x1d8: 0x40306, // ondrop - 0x1da: 0x60008, // onscroll - 0x1db: 0x1fb0b, // crossorigin - 0x1dc: 0x5730a, // onpageshow - 0x1dd: 0x4, // abbr - 0x1de: 0x9202, // td - 0x1df: 0x58b0f, // contenteditable - 0x1e0: 0x27206, // action - 0x1e1: 0x1400b, // playsinline - 0x1e2: 0x43107, // onfocus - 0x1e3: 0x2e008, // hreflang - 0x1e5: 0x5160a, // onmouseout - 0x1e6: 0x5ea07, // onreset - 0x1e7: 0x13c08, // autoplay - 0x1e8: 0x63109, // onseeking - 0x1ea: 0x67506, // scoped - 0x1ec: 0x30a, // radiogroup - 0x1ee: 0x3800b, // contextmenu - 0x1ef: 0x52e09, // onmouseup - 0x1f1: 0x2ca06, // hgroup - 0x1f2: 0x2080f, // allowfullscreen - 0x1f3: 0x4be08, // tabindex - 0x1f6: 0x30f07, // isindex - 0x1f7: 0x1a0e, // accept-charset - 0x1f8: 0x2ae0e, // formnovalidate - 0x1fb: 0x1c90e, // annotation-xml - 0x1fc: 0x6e05, // embed - 0x1fd: 0x21806, // script - 0x1fe: 0xbb06, // dialog - 0x1ff: 0x1d707, // command -} - -const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb" + - "asefontimeupdateviacacheightmlabelooptgroupatternoembedetail" + - "sampictureversedfnoframesetdirnameterowspanomoduleacronymali" + - "gnmarkbdialogallowpaymentrequestrikeytypeallowusermediagroup" + - "ingaltfooterubyasyncanvasidefaultitleaudioncancelautofocusan" + - "dboxmplaceholderautoplaysinlinebdoncanplaythrough1bgsoundisa" + - "bledivarbigblinkindraggablegendblockquotebuttonabortcitempro" + - "penoncecolgrouplaintextrackcolorcolspannotation-xmlcommandco" + - "ntrolshapecoordslotranslatecrossoriginsmallowfullscreenoscri" + - "ptfacenterfieldsetfigcaptionafterprintegrityfigurequiredfore" + - "ignObjectforeignobjectformactionautocompleteerrorformenctype" + - "mustmatchallengeformmethodformnovalidatetimeformtargethgroup" + - "osterhiddenhigh2hreflanghttp-equivideonclickiframeimageimgly" + - "ph3isindexismappletitemtypemarqueematheadersortedmaxlength4m" + - "inlength5mtextareadonlymultiplemutedoncloseamlessourceoncont" + - "extmenuitemidoncopyoncuechangeoncutondblclickondragendondrag" + - "enterondragexitemreferrerpolicyondragleaveondragoverondragst" + - "articleondropzonemptiedondurationchangeonendedonerroronfocus" + - "paceronhashchangeoninputmodeloninvalidonkeydownloadonkeypres" + - "spellcheckedonkeyupreloadonlanguagechangeonloadeddatalisting" + - "onloadedmetadatabindexonloadendonloadstartonmessageerroronmo" + - "usedownonmouseenteronmouseleaveonmousemoveonmouseoutputonmou" + - "seoveronmouseupromptonmousewheelonofflineononlineonpagehides" + - "classectionbluronpageshowbronpastepublicontenteditableonpaus" + - "emaponplayingonpopstateonprogressrcdocodeferonratechangeonre" + - "jectionhandledonresetonresizesrclangonscrollonsecuritypolicy" + - "violationauxclickonseekedonseekingonselectedonshowidth6onsor" + - "tableonstalledonstorageonsubmitemscopedonsuspendontoggleonun" + - "handledrejectionbeforeprintonunloadonvolumechangeonwaitingon" + - "wheeloptimumanifestrongoptionbeforeunloaddressrcsetstylesumm" + - "arysupsvgsystemplateworkertypewrap" diff --git a/vendor/golang.org/x/net/html/const.go b/vendor/golang.org/x/net/html/const.go deleted file mode 100644 index a3a918f0b3..0000000000 --- a/vendor/golang.org/x/net/html/const.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -// Section 12.2.4.2 of the HTML5 specification says "The following elements -// have varying levels of special parsing rules". -// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements -var isSpecialElementMap = map[string]bool{ - "address": true, - "applet": true, - "area": true, - "article": true, - "aside": true, - "base": true, - "basefont": true, - "bgsound": true, - "blockquote": true, - "body": true, - "br": true, - "button": true, - "caption": true, - "center": true, - "col": true, - "colgroup": true, - "dd": true, - "details": true, - "dir": true, - "div": true, - "dl": true, - "dt": true, - "embed": true, - "fieldset": true, - "figcaption": true, - "figure": true, - "footer": true, - "form": true, - "frame": true, - "frameset": true, - "h1": true, - "h2": true, - "h3": true, - "h4": true, - "h5": true, - "h6": true, - "head": true, - "header": true, - "hgroup": true, - "hr": true, - "html": true, - "iframe": true, - "img": true, - "input": true, - "isindex": true, // The 'isindex' element has been removed, but keep it for backwards compatibility. - "keygen": true, - "li": true, - "link": true, - "listing": true, - "main": true, - "marquee": true, - "menu": true, - "meta": true, - "nav": true, - "noembed": true, - "noframes": true, - "noscript": true, - "object": true, - "ol": true, - "p": true, - "param": true, - "plaintext": true, - "pre": true, - "script": true, - "section": true, - "select": true, - "source": true, - "style": true, - "summary": true, - "table": true, - "tbody": true, - "td": true, - "template": true, - "textarea": true, - "tfoot": true, - "th": true, - "thead": true, - "title": true, - "tr": true, - "track": true, - "ul": true, - "wbr": true, - "xmp": true, -} - -func isSpecialElement(element *Node) bool { - switch element.Namespace { - case "", "html": - return isSpecialElementMap[element.Data] - case "math": - switch element.Data { - case "mi", "mo", "mn", "ms", "mtext", "annotation-xml": - return true - } - case "svg": - switch element.Data { - case "foreignObject", "desc", "title": - return true - } - } - return false -} diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go deleted file mode 100644 index 822ed42a04..0000000000 --- a/vendor/golang.org/x/net/html/doc.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package html implements an HTML5-compliant tokenizer and parser. - -Tokenization is done by creating a Tokenizer for an io.Reader r. It is the -caller's responsibility to ensure that r provides UTF-8 encoded HTML. - - z := html.NewTokenizer(r) - -Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(), -which parses the next token and returns its type, or an error: - - for { - tt := z.Next() - if tt == html.ErrorToken { - // ... - return ... - } - // Process the current token. - } - -There are two APIs for retrieving the current token. The high-level API is to -call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs -allow optionally calling Raw after Next but before Token, Text, TagName, or -TagAttr. In EBNF notation, the valid call sequence per token is: - - Next {Raw} [ Token | Text | TagName {TagAttr} ] - -Token returns an independent data structure that completely describes a token. -Entities (such as "<") are unescaped, tag names and attribute keys are -lower-cased, and attributes are collected into a []Attribute. For example: - - for { - if z.Next() == html.ErrorToken { - // Returning io.EOF indicates success. - return z.Err() - } - emitToken(z.Token()) - } - -The low-level API performs fewer allocations and copies, but the contents of -the []byte values returned by Text, TagName and TagAttr may change on the next -call to Next. For example, to extract an HTML page's anchor text: - - depth := 0 - for { - tt := z.Next() - switch tt { - case html.ErrorToken: - return z.Err() - case html.TextToken: - if depth > 0 { - // emitBytes should copy the []byte it receives, - // if it doesn't process it immediately. - emitBytes(z.Text()) - } - case html.StartTagToken, html.EndTagToken: - tn, _ := z.TagName() - if len(tn) == 1 && tn[0] == 'a' { - if tt == html.StartTagToken { - depth++ - } else { - depth-- - } - } - } - } - -Parsing is done by calling Parse with an io.Reader, which returns the root of -the parse tree (the document element) as a *Node. It is the caller's -responsibility to ensure that the Reader provides UTF-8 encoded HTML. For -example, to process each anchor node in depth-first order: - - doc, err := html.Parse(r) - if err != nil { - // ... - } - var f func(*html.Node) - f = func(n *html.Node) { - if n.Type == html.ElementNode && n.Data == "a" { - // Do something with n... - } - for c := n.FirstChild; c != nil; c = c.NextSibling { - f(c) - } - } - f(doc) - -The relevant specifications include: -https://html.spec.whatwg.org/multipage/syntax.html and -https://html.spec.whatwg.org/multipage/syntax.html#tokenization -*/ -package html // import "golang.org/x/net/html" - -// The tokenization algorithm implemented by this package is not a line-by-line -// transliteration of the relatively verbose state-machine in the WHATWG -// specification. A more direct approach is used instead, where the program -// counter implies the state, such as whether it is tokenizing a tag or a text -// node. Specification compliance is verified by checking expected and actual -// outputs over a test suite rather than aiming for algorithmic fidelity. - -// TODO(nigeltao): Does a DOM API belong in this package or a separate one? -// TODO(nigeltao): How does parsing interact with a JavaScript engine? diff --git a/vendor/golang.org/x/net/html/doctype.go b/vendor/golang.org/x/net/html/doctype.go deleted file mode 100644 index c484e5a94f..0000000000 --- a/vendor/golang.org/x/net/html/doctype.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "strings" -) - -// parseDoctype parses the data from a DoctypeToken into a name, -// public identifier, and system identifier. It returns a Node whose Type -// is DoctypeNode, whose Data is the name, and which has attributes -// named "system" and "public" for the two identifiers if they were present. -// quirks is whether the document should be parsed in "quirks mode". -func parseDoctype(s string) (n *Node, quirks bool) { - n = &Node{Type: DoctypeNode} - - // Find the name. - space := strings.IndexAny(s, whitespace) - if space == -1 { - space = len(s) - } - n.Data = s[:space] - // The comparison to "html" is case-sensitive. - if n.Data != "html" { - quirks = true - } - n.Data = strings.ToLower(n.Data) - s = strings.TrimLeft(s[space:], whitespace) - - if len(s) < 6 { - // It can't start with "PUBLIC" or "SYSTEM". - // Ignore the rest of the string. - return n, quirks || s != "" - } - - key := strings.ToLower(s[:6]) - s = s[6:] - for key == "public" || key == "system" { - s = strings.TrimLeft(s, whitespace) - if s == "" { - break - } - quote := s[0] - if quote != '"' && quote != '\'' { - break - } - s = s[1:] - q := strings.IndexRune(s, rune(quote)) - var id string - if q == -1 { - id = s - s = "" - } else { - id = s[:q] - s = s[q+1:] - } - n.Attr = append(n.Attr, Attribute{Key: key, Val: id}) - if key == "public" { - key = "system" - } else { - key = "" - } - } - - if key != "" || s != "" { - quirks = true - } else if len(n.Attr) > 0 { - if n.Attr[0].Key == "public" { - public := strings.ToLower(n.Attr[0].Val) - switch public { - case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html": - quirks = true - default: - for _, q := range quirkyIDs { - if strings.HasPrefix(public, q) { - quirks = true - break - } - } - } - // The following two public IDs only cause quirks mode if there is no system ID. - if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") || - strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) { - quirks = true - } - } - if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" && - strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" { - quirks = true - } - } - - return n, quirks -} - -// quirkyIDs is a list of public doctype identifiers that cause a document -// to be interpreted in quirks mode. The identifiers should be in lower case. -var quirkyIDs = []string{ - "+//silmaril//dtd html pro v0r11 19970101//", - "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", - "-//as//dtd html 3.0 aswedit + extensions//", - "-//ietf//dtd html 2.0 level 1//", - "-//ietf//dtd html 2.0 level 2//", - "-//ietf//dtd html 2.0 strict level 1//", - "-//ietf//dtd html 2.0 strict level 2//", - "-//ietf//dtd html 2.0 strict//", - "-//ietf//dtd html 2.0//", - "-//ietf//dtd html 2.1e//", - "-//ietf//dtd html 3.0//", - "-//ietf//dtd html 3.2 final//", - "-//ietf//dtd html 3.2//", - "-//ietf//dtd html 3//", - "-//ietf//dtd html level 0//", - "-//ietf//dtd html level 1//", - "-//ietf//dtd html level 2//", - "-//ietf//dtd html level 3//", - "-//ietf//dtd html strict level 0//", - "-//ietf//dtd html strict level 1//", - "-//ietf//dtd html strict level 2//", - "-//ietf//dtd html strict level 3//", - "-//ietf//dtd html strict//", - "-//ietf//dtd html//", - "-//metrius//dtd metrius presentational//", - "-//microsoft//dtd internet explorer 2.0 html strict//", - "-//microsoft//dtd internet explorer 2.0 html//", - "-//microsoft//dtd internet explorer 2.0 tables//", - "-//microsoft//dtd internet explorer 3.0 html strict//", - "-//microsoft//dtd internet explorer 3.0 html//", - "-//microsoft//dtd internet explorer 3.0 tables//", - "-//netscape comm. corp.//dtd html//", - "-//netscape comm. corp.//dtd strict html//", - "-//o'reilly and associates//dtd html 2.0//", - "-//o'reilly and associates//dtd html extended 1.0//", - "-//o'reilly and associates//dtd html extended relaxed 1.0//", - "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", - "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", - "-//spyglass//dtd html 2.0 extended//", - "-//sq//dtd html 2.0 hotmetal + extensions//", - "-//sun microsystems corp.//dtd hotjava html//", - "-//sun microsystems corp.//dtd hotjava strict html//", - "-//w3c//dtd html 3 1995-03-24//", - "-//w3c//dtd html 3.2 draft//", - "-//w3c//dtd html 3.2 final//", - "-//w3c//dtd html 3.2//", - "-//w3c//dtd html 3.2s draft//", - "-//w3c//dtd html 4.0 frameset//", - "-//w3c//dtd html 4.0 transitional//", - "-//w3c//dtd html experimental 19960712//", - "-//w3c//dtd html experimental 970421//", - "-//w3c//dtd w3 html//", - "-//w3o//dtd w3 html 3.0//", - "-//webtechs//dtd mozilla html 2.0//", - "-//webtechs//dtd mozilla html//", -} diff --git a/vendor/golang.org/x/net/html/entity.go b/vendor/golang.org/x/net/html/entity.go deleted file mode 100644 index b628880a01..0000000000 --- a/vendor/golang.org/x/net/html/entity.go +++ /dev/null @@ -1,2253 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -// All entities that do not end with ';' are 6 or fewer bytes long. -const longestEntityWithoutSemicolon = 6 - -// entity is a map from HTML entity names to their values. The semicolon matters: -// https://html.spec.whatwg.org/multipage/syntax.html#named-character-references -// lists both "amp" and "amp;" as two separate entries. -// -// Note that the HTML5 list is larger than the HTML4 list at -// http://www.w3.org/TR/html4/sgml/entities.html -var entity = map[string]rune{ - "AElig;": '\U000000C6', - "AMP;": '\U00000026', - "Aacute;": '\U000000C1', - "Abreve;": '\U00000102', - "Acirc;": '\U000000C2', - "Acy;": '\U00000410', - "Afr;": '\U0001D504', - "Agrave;": '\U000000C0', - "Alpha;": '\U00000391', - "Amacr;": '\U00000100', - "And;": '\U00002A53', - "Aogon;": '\U00000104', - "Aopf;": '\U0001D538', - "ApplyFunction;": '\U00002061', - "Aring;": '\U000000C5', - "Ascr;": '\U0001D49C', - "Assign;": '\U00002254', - "Atilde;": '\U000000C3', - "Auml;": '\U000000C4', - "Backslash;": '\U00002216', - "Barv;": '\U00002AE7', - "Barwed;": '\U00002306', - "Bcy;": '\U00000411', - "Because;": '\U00002235', - "Bernoullis;": '\U0000212C', - "Beta;": '\U00000392', - "Bfr;": '\U0001D505', - "Bopf;": '\U0001D539', - "Breve;": '\U000002D8', - "Bscr;": '\U0000212C', - "Bumpeq;": '\U0000224E', - "CHcy;": '\U00000427', - "COPY;": '\U000000A9', - "Cacute;": '\U00000106', - "Cap;": '\U000022D2', - "CapitalDifferentialD;": '\U00002145', - "Cayleys;": '\U0000212D', - "Ccaron;": '\U0000010C', - "Ccedil;": '\U000000C7', - "Ccirc;": '\U00000108', - "Cconint;": '\U00002230', - "Cdot;": '\U0000010A', - "Cedilla;": '\U000000B8', - "CenterDot;": '\U000000B7', - "Cfr;": '\U0000212D', - "Chi;": '\U000003A7', - "CircleDot;": '\U00002299', - "CircleMinus;": '\U00002296', - "CirclePlus;": '\U00002295', - "CircleTimes;": '\U00002297', - "ClockwiseContourIntegral;": '\U00002232', - "CloseCurlyDoubleQuote;": '\U0000201D', - "CloseCurlyQuote;": '\U00002019', - "Colon;": '\U00002237', - "Colone;": '\U00002A74', - "Congruent;": '\U00002261', - "Conint;": '\U0000222F', - "ContourIntegral;": '\U0000222E', - "Copf;": '\U00002102', - "Coproduct;": '\U00002210', - "CounterClockwiseContourIntegral;": '\U00002233', - "Cross;": '\U00002A2F', - "Cscr;": '\U0001D49E', - "Cup;": '\U000022D3', - "CupCap;": '\U0000224D', - "DD;": '\U00002145', - "DDotrahd;": '\U00002911', - "DJcy;": '\U00000402', - "DScy;": '\U00000405', - "DZcy;": '\U0000040F', - "Dagger;": '\U00002021', - "Darr;": '\U000021A1', - "Dashv;": '\U00002AE4', - "Dcaron;": '\U0000010E', - "Dcy;": '\U00000414', - "Del;": '\U00002207', - "Delta;": '\U00000394', - "Dfr;": '\U0001D507', - "DiacriticalAcute;": '\U000000B4', - "DiacriticalDot;": '\U000002D9', - "DiacriticalDoubleAcute;": '\U000002DD', - "DiacriticalGrave;": '\U00000060', - "DiacriticalTilde;": '\U000002DC', - "Diamond;": '\U000022C4', - "DifferentialD;": '\U00002146', - "Dopf;": '\U0001D53B', - "Dot;": '\U000000A8', - "DotDot;": '\U000020DC', - "DotEqual;": '\U00002250', - "DoubleContourIntegral;": '\U0000222F', - "DoubleDot;": '\U000000A8', - "DoubleDownArrow;": '\U000021D3', - "DoubleLeftArrow;": '\U000021D0', - "DoubleLeftRightArrow;": '\U000021D4', - "DoubleLeftTee;": '\U00002AE4', - "DoubleLongLeftArrow;": '\U000027F8', - "DoubleLongLeftRightArrow;": '\U000027FA', - "DoubleLongRightArrow;": '\U000027F9', - "DoubleRightArrow;": '\U000021D2', - "DoubleRightTee;": '\U000022A8', - "DoubleUpArrow;": '\U000021D1', - "DoubleUpDownArrow;": '\U000021D5', - "DoubleVerticalBar;": '\U00002225', - "DownArrow;": '\U00002193', - "DownArrowBar;": '\U00002913', - "DownArrowUpArrow;": '\U000021F5', - "DownBreve;": '\U00000311', - "DownLeftRightVector;": '\U00002950', - "DownLeftTeeVector;": '\U0000295E', - "DownLeftVector;": '\U000021BD', - "DownLeftVectorBar;": '\U00002956', - "DownRightTeeVector;": '\U0000295F', - "DownRightVector;": '\U000021C1', - "DownRightVectorBar;": '\U00002957', - "DownTee;": '\U000022A4', - "DownTeeArrow;": '\U000021A7', - "Downarrow;": '\U000021D3', - "Dscr;": '\U0001D49F', - "Dstrok;": '\U00000110', - "ENG;": '\U0000014A', - "ETH;": '\U000000D0', - "Eacute;": '\U000000C9', - "Ecaron;": '\U0000011A', - "Ecirc;": '\U000000CA', - "Ecy;": '\U0000042D', - "Edot;": '\U00000116', - "Efr;": '\U0001D508', - "Egrave;": '\U000000C8', - "Element;": '\U00002208', - "Emacr;": '\U00000112', - "EmptySmallSquare;": '\U000025FB', - "EmptyVerySmallSquare;": '\U000025AB', - "Eogon;": '\U00000118', - "Eopf;": '\U0001D53C', - "Epsilon;": '\U00000395', - "Equal;": '\U00002A75', - "EqualTilde;": '\U00002242', - "Equilibrium;": '\U000021CC', - "Escr;": '\U00002130', - "Esim;": '\U00002A73', - "Eta;": '\U00000397', - "Euml;": '\U000000CB', - "Exists;": '\U00002203', - "ExponentialE;": '\U00002147', - "Fcy;": '\U00000424', - "Ffr;": '\U0001D509', - "FilledSmallSquare;": '\U000025FC', - "FilledVerySmallSquare;": '\U000025AA', - "Fopf;": '\U0001D53D', - "ForAll;": '\U00002200', - "Fouriertrf;": '\U00002131', - "Fscr;": '\U00002131', - "GJcy;": '\U00000403', - "GT;": '\U0000003E', - "Gamma;": '\U00000393', - "Gammad;": '\U000003DC', - "Gbreve;": '\U0000011E', - "Gcedil;": '\U00000122', - "Gcirc;": '\U0000011C', - "Gcy;": '\U00000413', - "Gdot;": '\U00000120', - "Gfr;": '\U0001D50A', - "Gg;": '\U000022D9', - "Gopf;": '\U0001D53E', - "GreaterEqual;": '\U00002265', - "GreaterEqualLess;": '\U000022DB', - "GreaterFullEqual;": '\U00002267', - "GreaterGreater;": '\U00002AA2', - "GreaterLess;": '\U00002277', - "GreaterSlantEqual;": '\U00002A7E', - "GreaterTilde;": '\U00002273', - "Gscr;": '\U0001D4A2', - "Gt;": '\U0000226B', - "HARDcy;": '\U0000042A', - "Hacek;": '\U000002C7', - "Hat;": '\U0000005E', - "Hcirc;": '\U00000124', - "Hfr;": '\U0000210C', - "HilbertSpace;": '\U0000210B', - "Hopf;": '\U0000210D', - "HorizontalLine;": '\U00002500', - "Hscr;": '\U0000210B', - "Hstrok;": '\U00000126', - "HumpDownHump;": '\U0000224E', - "HumpEqual;": '\U0000224F', - "IEcy;": '\U00000415', - "IJlig;": '\U00000132', - "IOcy;": '\U00000401', - "Iacute;": '\U000000CD', - "Icirc;": '\U000000CE', - "Icy;": '\U00000418', - "Idot;": '\U00000130', - "Ifr;": '\U00002111', - "Igrave;": '\U000000CC', - "Im;": '\U00002111', - "Imacr;": '\U0000012A', - "ImaginaryI;": '\U00002148', - "Implies;": '\U000021D2', - "Int;": '\U0000222C', - "Integral;": '\U0000222B', - "Intersection;": '\U000022C2', - "InvisibleComma;": '\U00002063', - "InvisibleTimes;": '\U00002062', - "Iogon;": '\U0000012E', - "Iopf;": '\U0001D540', - "Iota;": '\U00000399', - "Iscr;": '\U00002110', - "Itilde;": '\U00000128', - "Iukcy;": '\U00000406', - "Iuml;": '\U000000CF', - "Jcirc;": '\U00000134', - "Jcy;": '\U00000419', - "Jfr;": '\U0001D50D', - "Jopf;": '\U0001D541', - "Jscr;": '\U0001D4A5', - "Jsercy;": '\U00000408', - "Jukcy;": '\U00000404', - "KHcy;": '\U00000425', - "KJcy;": '\U0000040C', - "Kappa;": '\U0000039A', - "Kcedil;": '\U00000136', - "Kcy;": '\U0000041A', - "Kfr;": '\U0001D50E', - "Kopf;": '\U0001D542', - "Kscr;": '\U0001D4A6', - "LJcy;": '\U00000409', - "LT;": '\U0000003C', - "Lacute;": '\U00000139', - "Lambda;": '\U0000039B', - "Lang;": '\U000027EA', - "Laplacetrf;": '\U00002112', - "Larr;": '\U0000219E', - "Lcaron;": '\U0000013D', - "Lcedil;": '\U0000013B', - "Lcy;": '\U0000041B', - "LeftAngleBracket;": '\U000027E8', - "LeftArrow;": '\U00002190', - "LeftArrowBar;": '\U000021E4', - "LeftArrowRightArrow;": '\U000021C6', - "LeftCeiling;": '\U00002308', - "LeftDoubleBracket;": '\U000027E6', - "LeftDownTeeVector;": '\U00002961', - "LeftDownVector;": '\U000021C3', - "LeftDownVectorBar;": '\U00002959', - "LeftFloor;": '\U0000230A', - "LeftRightArrow;": '\U00002194', - "LeftRightVector;": '\U0000294E', - "LeftTee;": '\U000022A3', - "LeftTeeArrow;": '\U000021A4', - "LeftTeeVector;": '\U0000295A', - "LeftTriangle;": '\U000022B2', - "LeftTriangleBar;": '\U000029CF', - "LeftTriangleEqual;": '\U000022B4', - "LeftUpDownVector;": '\U00002951', - "LeftUpTeeVector;": '\U00002960', - "LeftUpVector;": '\U000021BF', - "LeftUpVectorBar;": '\U00002958', - "LeftVector;": '\U000021BC', - "LeftVectorBar;": '\U00002952', - "Leftarrow;": '\U000021D0', - "Leftrightarrow;": '\U000021D4', - "LessEqualGreater;": '\U000022DA', - "LessFullEqual;": '\U00002266', - "LessGreater;": '\U00002276', - "LessLess;": '\U00002AA1', - "LessSlantEqual;": '\U00002A7D', - "LessTilde;": '\U00002272', - "Lfr;": '\U0001D50F', - "Ll;": '\U000022D8', - "Lleftarrow;": '\U000021DA', - "Lmidot;": '\U0000013F', - "LongLeftArrow;": '\U000027F5', - "LongLeftRightArrow;": '\U000027F7', - "LongRightArrow;": '\U000027F6', - "Longleftarrow;": '\U000027F8', - "Longleftrightarrow;": '\U000027FA', - "Longrightarrow;": '\U000027F9', - "Lopf;": '\U0001D543', - "LowerLeftArrow;": '\U00002199', - "LowerRightArrow;": '\U00002198', - "Lscr;": '\U00002112', - "Lsh;": '\U000021B0', - "Lstrok;": '\U00000141', - "Lt;": '\U0000226A', - "Map;": '\U00002905', - "Mcy;": '\U0000041C', - "MediumSpace;": '\U0000205F', - "Mellintrf;": '\U00002133', - "Mfr;": '\U0001D510', - "MinusPlus;": '\U00002213', - "Mopf;": '\U0001D544', - "Mscr;": '\U00002133', - "Mu;": '\U0000039C', - "NJcy;": '\U0000040A', - "Nacute;": '\U00000143', - "Ncaron;": '\U00000147', - "Ncedil;": '\U00000145', - "Ncy;": '\U0000041D', - "NegativeMediumSpace;": '\U0000200B', - "NegativeThickSpace;": '\U0000200B', - "NegativeThinSpace;": '\U0000200B', - "NegativeVeryThinSpace;": '\U0000200B', - "NestedGreaterGreater;": '\U0000226B', - "NestedLessLess;": '\U0000226A', - "NewLine;": '\U0000000A', - "Nfr;": '\U0001D511', - "NoBreak;": '\U00002060', - "NonBreakingSpace;": '\U000000A0', - "Nopf;": '\U00002115', - "Not;": '\U00002AEC', - "NotCongruent;": '\U00002262', - "NotCupCap;": '\U0000226D', - "NotDoubleVerticalBar;": '\U00002226', - "NotElement;": '\U00002209', - "NotEqual;": '\U00002260', - "NotExists;": '\U00002204', - "NotGreater;": '\U0000226F', - "NotGreaterEqual;": '\U00002271', - "NotGreaterLess;": '\U00002279', - "NotGreaterTilde;": '\U00002275', - "NotLeftTriangle;": '\U000022EA', - "NotLeftTriangleEqual;": '\U000022EC', - "NotLess;": '\U0000226E', - "NotLessEqual;": '\U00002270', - "NotLessGreater;": '\U00002278', - "NotLessTilde;": '\U00002274', - "NotPrecedes;": '\U00002280', - "NotPrecedesSlantEqual;": '\U000022E0', - "NotReverseElement;": '\U0000220C', - "NotRightTriangle;": '\U000022EB', - "NotRightTriangleEqual;": '\U000022ED', - "NotSquareSubsetEqual;": '\U000022E2', - "NotSquareSupersetEqual;": '\U000022E3', - "NotSubsetEqual;": '\U00002288', - "NotSucceeds;": '\U00002281', - "NotSucceedsSlantEqual;": '\U000022E1', - "NotSupersetEqual;": '\U00002289', - "NotTilde;": '\U00002241', - "NotTildeEqual;": '\U00002244', - "NotTildeFullEqual;": '\U00002247', - "NotTildeTilde;": '\U00002249', - "NotVerticalBar;": '\U00002224', - "Nscr;": '\U0001D4A9', - "Ntilde;": '\U000000D1', - "Nu;": '\U0000039D', - "OElig;": '\U00000152', - "Oacute;": '\U000000D3', - "Ocirc;": '\U000000D4', - "Ocy;": '\U0000041E', - "Odblac;": '\U00000150', - "Ofr;": '\U0001D512', - "Ograve;": '\U000000D2', - "Omacr;": '\U0000014C', - "Omega;": '\U000003A9', - "Omicron;": '\U0000039F', - "Oopf;": '\U0001D546', - "OpenCurlyDoubleQuote;": '\U0000201C', - "OpenCurlyQuote;": '\U00002018', - "Or;": '\U00002A54', - "Oscr;": '\U0001D4AA', - "Oslash;": '\U000000D8', - "Otilde;": '\U000000D5', - "Otimes;": '\U00002A37', - "Ouml;": '\U000000D6', - "OverBar;": '\U0000203E', - "OverBrace;": '\U000023DE', - "OverBracket;": '\U000023B4', - "OverParenthesis;": '\U000023DC', - "PartialD;": '\U00002202', - "Pcy;": '\U0000041F', - "Pfr;": '\U0001D513', - "Phi;": '\U000003A6', - "Pi;": '\U000003A0', - "PlusMinus;": '\U000000B1', - "Poincareplane;": '\U0000210C', - "Popf;": '\U00002119', - "Pr;": '\U00002ABB', - "Precedes;": '\U0000227A', - "PrecedesEqual;": '\U00002AAF', - "PrecedesSlantEqual;": '\U0000227C', - "PrecedesTilde;": '\U0000227E', - "Prime;": '\U00002033', - "Product;": '\U0000220F', - "Proportion;": '\U00002237', - "Proportional;": '\U0000221D', - "Pscr;": '\U0001D4AB', - "Psi;": '\U000003A8', - "QUOT;": '\U00000022', - "Qfr;": '\U0001D514', - "Qopf;": '\U0000211A', - "Qscr;": '\U0001D4AC', - "RBarr;": '\U00002910', - "REG;": '\U000000AE', - "Racute;": '\U00000154', - "Rang;": '\U000027EB', - "Rarr;": '\U000021A0', - "Rarrtl;": '\U00002916', - "Rcaron;": '\U00000158', - "Rcedil;": '\U00000156', - "Rcy;": '\U00000420', - "Re;": '\U0000211C', - "ReverseElement;": '\U0000220B', - "ReverseEquilibrium;": '\U000021CB', - "ReverseUpEquilibrium;": '\U0000296F', - "Rfr;": '\U0000211C', - "Rho;": '\U000003A1', - "RightAngleBracket;": '\U000027E9', - "RightArrow;": '\U00002192', - "RightArrowBar;": '\U000021E5', - "RightArrowLeftArrow;": '\U000021C4', - "RightCeiling;": '\U00002309', - "RightDoubleBracket;": '\U000027E7', - "RightDownTeeVector;": '\U0000295D', - "RightDownVector;": '\U000021C2', - "RightDownVectorBar;": '\U00002955', - "RightFloor;": '\U0000230B', - "RightTee;": '\U000022A2', - "RightTeeArrow;": '\U000021A6', - "RightTeeVector;": '\U0000295B', - "RightTriangle;": '\U000022B3', - "RightTriangleBar;": '\U000029D0', - "RightTriangleEqual;": '\U000022B5', - "RightUpDownVector;": '\U0000294F', - "RightUpTeeVector;": '\U0000295C', - "RightUpVector;": '\U000021BE', - "RightUpVectorBar;": '\U00002954', - "RightVector;": '\U000021C0', - "RightVectorBar;": '\U00002953', - "Rightarrow;": '\U000021D2', - "Ropf;": '\U0000211D', - "RoundImplies;": '\U00002970', - "Rrightarrow;": '\U000021DB', - "Rscr;": '\U0000211B', - "Rsh;": '\U000021B1', - "RuleDelayed;": '\U000029F4', - "SHCHcy;": '\U00000429', - "SHcy;": '\U00000428', - "SOFTcy;": '\U0000042C', - "Sacute;": '\U0000015A', - "Sc;": '\U00002ABC', - "Scaron;": '\U00000160', - "Scedil;": '\U0000015E', - "Scirc;": '\U0000015C', - "Scy;": '\U00000421', - "Sfr;": '\U0001D516', - "ShortDownArrow;": '\U00002193', - "ShortLeftArrow;": '\U00002190', - "ShortRightArrow;": '\U00002192', - "ShortUpArrow;": '\U00002191', - "Sigma;": '\U000003A3', - "SmallCircle;": '\U00002218', - "Sopf;": '\U0001D54A', - "Sqrt;": '\U0000221A', - "Square;": '\U000025A1', - "SquareIntersection;": '\U00002293', - "SquareSubset;": '\U0000228F', - "SquareSubsetEqual;": '\U00002291', - "SquareSuperset;": '\U00002290', - "SquareSupersetEqual;": '\U00002292', - "SquareUnion;": '\U00002294', - "Sscr;": '\U0001D4AE', - "Star;": '\U000022C6', - "Sub;": '\U000022D0', - "Subset;": '\U000022D0', - "SubsetEqual;": '\U00002286', - "Succeeds;": '\U0000227B', - "SucceedsEqual;": '\U00002AB0', - "SucceedsSlantEqual;": '\U0000227D', - "SucceedsTilde;": '\U0000227F', - "SuchThat;": '\U0000220B', - "Sum;": '\U00002211', - "Sup;": '\U000022D1', - "Superset;": '\U00002283', - "SupersetEqual;": '\U00002287', - "Supset;": '\U000022D1', - "THORN;": '\U000000DE', - "TRADE;": '\U00002122', - "TSHcy;": '\U0000040B', - "TScy;": '\U00000426', - "Tab;": '\U00000009', - "Tau;": '\U000003A4', - "Tcaron;": '\U00000164', - "Tcedil;": '\U00000162', - "Tcy;": '\U00000422', - "Tfr;": '\U0001D517', - "Therefore;": '\U00002234', - "Theta;": '\U00000398', - "ThinSpace;": '\U00002009', - "Tilde;": '\U0000223C', - "TildeEqual;": '\U00002243', - "TildeFullEqual;": '\U00002245', - "TildeTilde;": '\U00002248', - "Topf;": '\U0001D54B', - "TripleDot;": '\U000020DB', - "Tscr;": '\U0001D4AF', - "Tstrok;": '\U00000166', - "Uacute;": '\U000000DA', - "Uarr;": '\U0000219F', - "Uarrocir;": '\U00002949', - "Ubrcy;": '\U0000040E', - "Ubreve;": '\U0000016C', - "Ucirc;": '\U000000DB', - "Ucy;": '\U00000423', - "Udblac;": '\U00000170', - "Ufr;": '\U0001D518', - "Ugrave;": '\U000000D9', - "Umacr;": '\U0000016A', - "UnderBar;": '\U0000005F', - "UnderBrace;": '\U000023DF', - "UnderBracket;": '\U000023B5', - "UnderParenthesis;": '\U000023DD', - "Union;": '\U000022C3', - "UnionPlus;": '\U0000228E', - "Uogon;": '\U00000172', - "Uopf;": '\U0001D54C', - "UpArrow;": '\U00002191', - "UpArrowBar;": '\U00002912', - "UpArrowDownArrow;": '\U000021C5', - "UpDownArrow;": '\U00002195', - "UpEquilibrium;": '\U0000296E', - "UpTee;": '\U000022A5', - "UpTeeArrow;": '\U000021A5', - "Uparrow;": '\U000021D1', - "Updownarrow;": '\U000021D5', - "UpperLeftArrow;": '\U00002196', - "UpperRightArrow;": '\U00002197', - "Upsi;": '\U000003D2', - "Upsilon;": '\U000003A5', - "Uring;": '\U0000016E', - "Uscr;": '\U0001D4B0', - "Utilde;": '\U00000168', - "Uuml;": '\U000000DC', - "VDash;": '\U000022AB', - "Vbar;": '\U00002AEB', - "Vcy;": '\U00000412', - "Vdash;": '\U000022A9', - "Vdashl;": '\U00002AE6', - "Vee;": '\U000022C1', - "Verbar;": '\U00002016', - "Vert;": '\U00002016', - "VerticalBar;": '\U00002223', - "VerticalLine;": '\U0000007C', - "VerticalSeparator;": '\U00002758', - "VerticalTilde;": '\U00002240', - "VeryThinSpace;": '\U0000200A', - "Vfr;": '\U0001D519', - "Vopf;": '\U0001D54D', - "Vscr;": '\U0001D4B1', - "Vvdash;": '\U000022AA', - "Wcirc;": '\U00000174', - "Wedge;": '\U000022C0', - "Wfr;": '\U0001D51A', - "Wopf;": '\U0001D54E', - "Wscr;": '\U0001D4B2', - "Xfr;": '\U0001D51B', - "Xi;": '\U0000039E', - "Xopf;": '\U0001D54F', - "Xscr;": '\U0001D4B3', - "YAcy;": '\U0000042F', - "YIcy;": '\U00000407', - "YUcy;": '\U0000042E', - "Yacute;": '\U000000DD', - "Ycirc;": '\U00000176', - "Ycy;": '\U0000042B', - "Yfr;": '\U0001D51C', - "Yopf;": '\U0001D550', - "Yscr;": '\U0001D4B4', - "Yuml;": '\U00000178', - "ZHcy;": '\U00000416', - "Zacute;": '\U00000179', - "Zcaron;": '\U0000017D', - "Zcy;": '\U00000417', - "Zdot;": '\U0000017B', - "ZeroWidthSpace;": '\U0000200B', - "Zeta;": '\U00000396', - "Zfr;": '\U00002128', - "Zopf;": '\U00002124', - "Zscr;": '\U0001D4B5', - "aacute;": '\U000000E1', - "abreve;": '\U00000103', - "ac;": '\U0000223E', - "acd;": '\U0000223F', - "acirc;": '\U000000E2', - "acute;": '\U000000B4', - "acy;": '\U00000430', - "aelig;": '\U000000E6', - "af;": '\U00002061', - "afr;": '\U0001D51E', - "agrave;": '\U000000E0', - "alefsym;": '\U00002135', - "aleph;": '\U00002135', - "alpha;": '\U000003B1', - "amacr;": '\U00000101', - "amalg;": '\U00002A3F', - "amp;": '\U00000026', - "and;": '\U00002227', - "andand;": '\U00002A55', - "andd;": '\U00002A5C', - "andslope;": '\U00002A58', - "andv;": '\U00002A5A', - "ang;": '\U00002220', - "ange;": '\U000029A4', - "angle;": '\U00002220', - "angmsd;": '\U00002221', - "angmsdaa;": '\U000029A8', - "angmsdab;": '\U000029A9', - "angmsdac;": '\U000029AA', - "angmsdad;": '\U000029AB', - "angmsdae;": '\U000029AC', - "angmsdaf;": '\U000029AD', - "angmsdag;": '\U000029AE', - "angmsdah;": '\U000029AF', - "angrt;": '\U0000221F', - "angrtvb;": '\U000022BE', - "angrtvbd;": '\U0000299D', - "angsph;": '\U00002222', - "angst;": '\U000000C5', - "angzarr;": '\U0000237C', - "aogon;": '\U00000105', - "aopf;": '\U0001D552', - "ap;": '\U00002248', - "apE;": '\U00002A70', - "apacir;": '\U00002A6F', - "ape;": '\U0000224A', - "apid;": '\U0000224B', - "apos;": '\U00000027', - "approx;": '\U00002248', - "approxeq;": '\U0000224A', - "aring;": '\U000000E5', - "ascr;": '\U0001D4B6', - "ast;": '\U0000002A', - "asymp;": '\U00002248', - "asympeq;": '\U0000224D', - "atilde;": '\U000000E3', - "auml;": '\U000000E4', - "awconint;": '\U00002233', - "awint;": '\U00002A11', - "bNot;": '\U00002AED', - "backcong;": '\U0000224C', - "backepsilon;": '\U000003F6', - "backprime;": '\U00002035', - "backsim;": '\U0000223D', - "backsimeq;": '\U000022CD', - "barvee;": '\U000022BD', - "barwed;": '\U00002305', - "barwedge;": '\U00002305', - "bbrk;": '\U000023B5', - "bbrktbrk;": '\U000023B6', - "bcong;": '\U0000224C', - "bcy;": '\U00000431', - "bdquo;": '\U0000201E', - "becaus;": '\U00002235', - "because;": '\U00002235', - "bemptyv;": '\U000029B0', - "bepsi;": '\U000003F6', - "bernou;": '\U0000212C', - "beta;": '\U000003B2', - "beth;": '\U00002136', - "between;": '\U0000226C', - "bfr;": '\U0001D51F', - "bigcap;": '\U000022C2', - "bigcirc;": '\U000025EF', - "bigcup;": '\U000022C3', - "bigodot;": '\U00002A00', - "bigoplus;": '\U00002A01', - "bigotimes;": '\U00002A02', - "bigsqcup;": '\U00002A06', - "bigstar;": '\U00002605', - "bigtriangledown;": '\U000025BD', - "bigtriangleup;": '\U000025B3', - "biguplus;": '\U00002A04', - "bigvee;": '\U000022C1', - "bigwedge;": '\U000022C0', - "bkarow;": '\U0000290D', - "blacklozenge;": '\U000029EB', - "blacksquare;": '\U000025AA', - "blacktriangle;": '\U000025B4', - "blacktriangledown;": '\U000025BE', - "blacktriangleleft;": '\U000025C2', - "blacktriangleright;": '\U000025B8', - "blank;": '\U00002423', - "blk12;": '\U00002592', - "blk14;": '\U00002591', - "blk34;": '\U00002593', - "block;": '\U00002588', - "bnot;": '\U00002310', - "bopf;": '\U0001D553', - "bot;": '\U000022A5', - "bottom;": '\U000022A5', - "bowtie;": '\U000022C8', - "boxDL;": '\U00002557', - "boxDR;": '\U00002554', - "boxDl;": '\U00002556', - "boxDr;": '\U00002553', - "boxH;": '\U00002550', - "boxHD;": '\U00002566', - "boxHU;": '\U00002569', - "boxHd;": '\U00002564', - "boxHu;": '\U00002567', - "boxUL;": '\U0000255D', - "boxUR;": '\U0000255A', - "boxUl;": '\U0000255C', - "boxUr;": '\U00002559', - "boxV;": '\U00002551', - "boxVH;": '\U0000256C', - "boxVL;": '\U00002563', - "boxVR;": '\U00002560', - "boxVh;": '\U0000256B', - "boxVl;": '\U00002562', - "boxVr;": '\U0000255F', - "boxbox;": '\U000029C9', - "boxdL;": '\U00002555', - "boxdR;": '\U00002552', - "boxdl;": '\U00002510', - "boxdr;": '\U0000250C', - "boxh;": '\U00002500', - "boxhD;": '\U00002565', - "boxhU;": '\U00002568', - "boxhd;": '\U0000252C', - "boxhu;": '\U00002534', - "boxminus;": '\U0000229F', - "boxplus;": '\U0000229E', - "boxtimes;": '\U000022A0', - "boxuL;": '\U0000255B', - "boxuR;": '\U00002558', - "boxul;": '\U00002518', - "boxur;": '\U00002514', - "boxv;": '\U00002502', - "boxvH;": '\U0000256A', - "boxvL;": '\U00002561', - "boxvR;": '\U0000255E', - "boxvh;": '\U0000253C', - "boxvl;": '\U00002524', - "boxvr;": '\U0000251C', - "bprime;": '\U00002035', - "breve;": '\U000002D8', - "brvbar;": '\U000000A6', - "bscr;": '\U0001D4B7', - "bsemi;": '\U0000204F', - "bsim;": '\U0000223D', - "bsime;": '\U000022CD', - "bsol;": '\U0000005C', - "bsolb;": '\U000029C5', - "bsolhsub;": '\U000027C8', - "bull;": '\U00002022', - "bullet;": '\U00002022', - "bump;": '\U0000224E', - "bumpE;": '\U00002AAE', - "bumpe;": '\U0000224F', - "bumpeq;": '\U0000224F', - "cacute;": '\U00000107', - "cap;": '\U00002229', - "capand;": '\U00002A44', - "capbrcup;": '\U00002A49', - "capcap;": '\U00002A4B', - "capcup;": '\U00002A47', - "capdot;": '\U00002A40', - "caret;": '\U00002041', - "caron;": '\U000002C7', - "ccaps;": '\U00002A4D', - "ccaron;": '\U0000010D', - "ccedil;": '\U000000E7', - "ccirc;": '\U00000109', - "ccups;": '\U00002A4C', - "ccupssm;": '\U00002A50', - "cdot;": '\U0000010B', - "cedil;": '\U000000B8', - "cemptyv;": '\U000029B2', - "cent;": '\U000000A2', - "centerdot;": '\U000000B7', - "cfr;": '\U0001D520', - "chcy;": '\U00000447', - "check;": '\U00002713', - "checkmark;": '\U00002713', - "chi;": '\U000003C7', - "cir;": '\U000025CB', - "cirE;": '\U000029C3', - "circ;": '\U000002C6', - "circeq;": '\U00002257', - "circlearrowleft;": '\U000021BA', - "circlearrowright;": '\U000021BB', - "circledR;": '\U000000AE', - "circledS;": '\U000024C8', - "circledast;": '\U0000229B', - "circledcirc;": '\U0000229A', - "circleddash;": '\U0000229D', - "cire;": '\U00002257', - "cirfnint;": '\U00002A10', - "cirmid;": '\U00002AEF', - "cirscir;": '\U000029C2', - "clubs;": '\U00002663', - "clubsuit;": '\U00002663', - "colon;": '\U0000003A', - "colone;": '\U00002254', - "coloneq;": '\U00002254', - "comma;": '\U0000002C', - "commat;": '\U00000040', - "comp;": '\U00002201', - "compfn;": '\U00002218', - "complement;": '\U00002201', - "complexes;": '\U00002102', - "cong;": '\U00002245', - "congdot;": '\U00002A6D', - "conint;": '\U0000222E', - "copf;": '\U0001D554', - "coprod;": '\U00002210', - "copy;": '\U000000A9', - "copysr;": '\U00002117', - "crarr;": '\U000021B5', - "cross;": '\U00002717', - "cscr;": '\U0001D4B8', - "csub;": '\U00002ACF', - "csube;": '\U00002AD1', - "csup;": '\U00002AD0', - "csupe;": '\U00002AD2', - "ctdot;": '\U000022EF', - "cudarrl;": '\U00002938', - "cudarrr;": '\U00002935', - "cuepr;": '\U000022DE', - "cuesc;": '\U000022DF', - "cularr;": '\U000021B6', - "cularrp;": '\U0000293D', - "cup;": '\U0000222A', - "cupbrcap;": '\U00002A48', - "cupcap;": '\U00002A46', - "cupcup;": '\U00002A4A', - "cupdot;": '\U0000228D', - "cupor;": '\U00002A45', - "curarr;": '\U000021B7', - "curarrm;": '\U0000293C', - "curlyeqprec;": '\U000022DE', - "curlyeqsucc;": '\U000022DF', - "curlyvee;": '\U000022CE', - "curlywedge;": '\U000022CF', - "curren;": '\U000000A4', - "curvearrowleft;": '\U000021B6', - "curvearrowright;": '\U000021B7', - "cuvee;": '\U000022CE', - "cuwed;": '\U000022CF', - "cwconint;": '\U00002232', - "cwint;": '\U00002231', - "cylcty;": '\U0000232D', - "dArr;": '\U000021D3', - "dHar;": '\U00002965', - "dagger;": '\U00002020', - "daleth;": '\U00002138', - "darr;": '\U00002193', - "dash;": '\U00002010', - "dashv;": '\U000022A3', - "dbkarow;": '\U0000290F', - "dblac;": '\U000002DD', - "dcaron;": '\U0000010F', - "dcy;": '\U00000434', - "dd;": '\U00002146', - "ddagger;": '\U00002021', - "ddarr;": '\U000021CA', - "ddotseq;": '\U00002A77', - "deg;": '\U000000B0', - "delta;": '\U000003B4', - "demptyv;": '\U000029B1', - "dfisht;": '\U0000297F', - "dfr;": '\U0001D521', - "dharl;": '\U000021C3', - "dharr;": '\U000021C2', - "diam;": '\U000022C4', - "diamond;": '\U000022C4', - "diamondsuit;": '\U00002666', - "diams;": '\U00002666', - "die;": '\U000000A8', - "digamma;": '\U000003DD', - "disin;": '\U000022F2', - "div;": '\U000000F7', - "divide;": '\U000000F7', - "divideontimes;": '\U000022C7', - "divonx;": '\U000022C7', - "djcy;": '\U00000452', - "dlcorn;": '\U0000231E', - "dlcrop;": '\U0000230D', - "dollar;": '\U00000024', - "dopf;": '\U0001D555', - "dot;": '\U000002D9', - "doteq;": '\U00002250', - "doteqdot;": '\U00002251', - "dotminus;": '\U00002238', - "dotplus;": '\U00002214', - "dotsquare;": '\U000022A1', - "doublebarwedge;": '\U00002306', - "downarrow;": '\U00002193', - "downdownarrows;": '\U000021CA', - "downharpoonleft;": '\U000021C3', - "downharpoonright;": '\U000021C2', - "drbkarow;": '\U00002910', - "drcorn;": '\U0000231F', - "drcrop;": '\U0000230C', - "dscr;": '\U0001D4B9', - "dscy;": '\U00000455', - "dsol;": '\U000029F6', - "dstrok;": '\U00000111', - "dtdot;": '\U000022F1', - "dtri;": '\U000025BF', - "dtrif;": '\U000025BE', - "duarr;": '\U000021F5', - "duhar;": '\U0000296F', - "dwangle;": '\U000029A6', - "dzcy;": '\U0000045F', - "dzigrarr;": '\U000027FF', - "eDDot;": '\U00002A77', - "eDot;": '\U00002251', - "eacute;": '\U000000E9', - "easter;": '\U00002A6E', - "ecaron;": '\U0000011B', - "ecir;": '\U00002256', - "ecirc;": '\U000000EA', - "ecolon;": '\U00002255', - "ecy;": '\U0000044D', - "edot;": '\U00000117', - "ee;": '\U00002147', - "efDot;": '\U00002252', - "efr;": '\U0001D522', - "eg;": '\U00002A9A', - "egrave;": '\U000000E8', - "egs;": '\U00002A96', - "egsdot;": '\U00002A98', - "el;": '\U00002A99', - "elinters;": '\U000023E7', - "ell;": '\U00002113', - "els;": '\U00002A95', - "elsdot;": '\U00002A97', - "emacr;": '\U00000113', - "empty;": '\U00002205', - "emptyset;": '\U00002205', - "emptyv;": '\U00002205', - "emsp;": '\U00002003', - "emsp13;": '\U00002004', - "emsp14;": '\U00002005', - "eng;": '\U0000014B', - "ensp;": '\U00002002', - "eogon;": '\U00000119', - "eopf;": '\U0001D556', - "epar;": '\U000022D5', - "eparsl;": '\U000029E3', - "eplus;": '\U00002A71', - "epsi;": '\U000003B5', - "epsilon;": '\U000003B5', - "epsiv;": '\U000003F5', - "eqcirc;": '\U00002256', - "eqcolon;": '\U00002255', - "eqsim;": '\U00002242', - "eqslantgtr;": '\U00002A96', - "eqslantless;": '\U00002A95', - "equals;": '\U0000003D', - "equest;": '\U0000225F', - "equiv;": '\U00002261', - "equivDD;": '\U00002A78', - "eqvparsl;": '\U000029E5', - "erDot;": '\U00002253', - "erarr;": '\U00002971', - "escr;": '\U0000212F', - "esdot;": '\U00002250', - "esim;": '\U00002242', - "eta;": '\U000003B7', - "eth;": '\U000000F0', - "euml;": '\U000000EB', - "euro;": '\U000020AC', - "excl;": '\U00000021', - "exist;": '\U00002203', - "expectation;": '\U00002130', - "exponentiale;": '\U00002147', - "fallingdotseq;": '\U00002252', - "fcy;": '\U00000444', - "female;": '\U00002640', - "ffilig;": '\U0000FB03', - "fflig;": '\U0000FB00', - "ffllig;": '\U0000FB04', - "ffr;": '\U0001D523', - "filig;": '\U0000FB01', - "flat;": '\U0000266D', - "fllig;": '\U0000FB02', - "fltns;": '\U000025B1', - "fnof;": '\U00000192', - "fopf;": '\U0001D557', - "forall;": '\U00002200', - "fork;": '\U000022D4', - "forkv;": '\U00002AD9', - "fpartint;": '\U00002A0D', - "frac12;": '\U000000BD', - "frac13;": '\U00002153', - "frac14;": '\U000000BC', - "frac15;": '\U00002155', - "frac16;": '\U00002159', - "frac18;": '\U0000215B', - "frac23;": '\U00002154', - "frac25;": '\U00002156', - "frac34;": '\U000000BE', - "frac35;": '\U00002157', - "frac38;": '\U0000215C', - "frac45;": '\U00002158', - "frac56;": '\U0000215A', - "frac58;": '\U0000215D', - "frac78;": '\U0000215E', - "frasl;": '\U00002044', - "frown;": '\U00002322', - "fscr;": '\U0001D4BB', - "gE;": '\U00002267', - "gEl;": '\U00002A8C', - "gacute;": '\U000001F5', - "gamma;": '\U000003B3', - "gammad;": '\U000003DD', - "gap;": '\U00002A86', - "gbreve;": '\U0000011F', - "gcirc;": '\U0000011D', - "gcy;": '\U00000433', - "gdot;": '\U00000121', - "ge;": '\U00002265', - "gel;": '\U000022DB', - "geq;": '\U00002265', - "geqq;": '\U00002267', - "geqslant;": '\U00002A7E', - "ges;": '\U00002A7E', - "gescc;": '\U00002AA9', - "gesdot;": '\U00002A80', - "gesdoto;": '\U00002A82', - "gesdotol;": '\U00002A84', - "gesles;": '\U00002A94', - "gfr;": '\U0001D524', - "gg;": '\U0000226B', - "ggg;": '\U000022D9', - "gimel;": '\U00002137', - "gjcy;": '\U00000453', - "gl;": '\U00002277', - "glE;": '\U00002A92', - "gla;": '\U00002AA5', - "glj;": '\U00002AA4', - "gnE;": '\U00002269', - "gnap;": '\U00002A8A', - "gnapprox;": '\U00002A8A', - "gne;": '\U00002A88', - "gneq;": '\U00002A88', - "gneqq;": '\U00002269', - "gnsim;": '\U000022E7', - "gopf;": '\U0001D558', - "grave;": '\U00000060', - "gscr;": '\U0000210A', - "gsim;": '\U00002273', - "gsime;": '\U00002A8E', - "gsiml;": '\U00002A90', - "gt;": '\U0000003E', - "gtcc;": '\U00002AA7', - "gtcir;": '\U00002A7A', - "gtdot;": '\U000022D7', - "gtlPar;": '\U00002995', - "gtquest;": '\U00002A7C', - "gtrapprox;": '\U00002A86', - "gtrarr;": '\U00002978', - "gtrdot;": '\U000022D7', - "gtreqless;": '\U000022DB', - "gtreqqless;": '\U00002A8C', - "gtrless;": '\U00002277', - "gtrsim;": '\U00002273', - "hArr;": '\U000021D4', - "hairsp;": '\U0000200A', - "half;": '\U000000BD', - "hamilt;": '\U0000210B', - "hardcy;": '\U0000044A', - "harr;": '\U00002194', - "harrcir;": '\U00002948', - "harrw;": '\U000021AD', - "hbar;": '\U0000210F', - "hcirc;": '\U00000125', - "hearts;": '\U00002665', - "heartsuit;": '\U00002665', - "hellip;": '\U00002026', - "hercon;": '\U000022B9', - "hfr;": '\U0001D525', - "hksearow;": '\U00002925', - "hkswarow;": '\U00002926', - "hoarr;": '\U000021FF', - "homtht;": '\U0000223B', - "hookleftarrow;": '\U000021A9', - "hookrightarrow;": '\U000021AA', - "hopf;": '\U0001D559', - "horbar;": '\U00002015', - "hscr;": '\U0001D4BD', - "hslash;": '\U0000210F', - "hstrok;": '\U00000127', - "hybull;": '\U00002043', - "hyphen;": '\U00002010', - "iacute;": '\U000000ED', - "ic;": '\U00002063', - "icirc;": '\U000000EE', - "icy;": '\U00000438', - "iecy;": '\U00000435', - "iexcl;": '\U000000A1', - "iff;": '\U000021D4', - "ifr;": '\U0001D526', - "igrave;": '\U000000EC', - "ii;": '\U00002148', - "iiiint;": '\U00002A0C', - "iiint;": '\U0000222D', - "iinfin;": '\U000029DC', - "iiota;": '\U00002129', - "ijlig;": '\U00000133', - "imacr;": '\U0000012B', - "image;": '\U00002111', - "imagline;": '\U00002110', - "imagpart;": '\U00002111', - "imath;": '\U00000131', - "imof;": '\U000022B7', - "imped;": '\U000001B5', - "in;": '\U00002208', - "incare;": '\U00002105', - "infin;": '\U0000221E', - "infintie;": '\U000029DD', - "inodot;": '\U00000131', - "int;": '\U0000222B', - "intcal;": '\U000022BA', - "integers;": '\U00002124', - "intercal;": '\U000022BA', - "intlarhk;": '\U00002A17', - "intprod;": '\U00002A3C', - "iocy;": '\U00000451', - "iogon;": '\U0000012F', - "iopf;": '\U0001D55A', - "iota;": '\U000003B9', - "iprod;": '\U00002A3C', - "iquest;": '\U000000BF', - "iscr;": '\U0001D4BE', - "isin;": '\U00002208', - "isinE;": '\U000022F9', - "isindot;": '\U000022F5', - "isins;": '\U000022F4', - "isinsv;": '\U000022F3', - "isinv;": '\U00002208', - "it;": '\U00002062', - "itilde;": '\U00000129', - "iukcy;": '\U00000456', - "iuml;": '\U000000EF', - "jcirc;": '\U00000135', - "jcy;": '\U00000439', - "jfr;": '\U0001D527', - "jmath;": '\U00000237', - "jopf;": '\U0001D55B', - "jscr;": '\U0001D4BF', - "jsercy;": '\U00000458', - "jukcy;": '\U00000454', - "kappa;": '\U000003BA', - "kappav;": '\U000003F0', - "kcedil;": '\U00000137', - "kcy;": '\U0000043A', - "kfr;": '\U0001D528', - "kgreen;": '\U00000138', - "khcy;": '\U00000445', - "kjcy;": '\U0000045C', - "kopf;": '\U0001D55C', - "kscr;": '\U0001D4C0', - "lAarr;": '\U000021DA', - "lArr;": '\U000021D0', - "lAtail;": '\U0000291B', - "lBarr;": '\U0000290E', - "lE;": '\U00002266', - "lEg;": '\U00002A8B', - "lHar;": '\U00002962', - "lacute;": '\U0000013A', - "laemptyv;": '\U000029B4', - "lagran;": '\U00002112', - "lambda;": '\U000003BB', - "lang;": '\U000027E8', - "langd;": '\U00002991', - "langle;": '\U000027E8', - "lap;": '\U00002A85', - "laquo;": '\U000000AB', - "larr;": '\U00002190', - "larrb;": '\U000021E4', - "larrbfs;": '\U0000291F', - "larrfs;": '\U0000291D', - "larrhk;": '\U000021A9', - "larrlp;": '\U000021AB', - "larrpl;": '\U00002939', - "larrsim;": '\U00002973', - "larrtl;": '\U000021A2', - "lat;": '\U00002AAB', - "latail;": '\U00002919', - "late;": '\U00002AAD', - "lbarr;": '\U0000290C', - "lbbrk;": '\U00002772', - "lbrace;": '\U0000007B', - "lbrack;": '\U0000005B', - "lbrke;": '\U0000298B', - "lbrksld;": '\U0000298F', - "lbrkslu;": '\U0000298D', - "lcaron;": '\U0000013E', - "lcedil;": '\U0000013C', - "lceil;": '\U00002308', - "lcub;": '\U0000007B', - "lcy;": '\U0000043B', - "ldca;": '\U00002936', - "ldquo;": '\U0000201C', - "ldquor;": '\U0000201E', - "ldrdhar;": '\U00002967', - "ldrushar;": '\U0000294B', - "ldsh;": '\U000021B2', - "le;": '\U00002264', - "leftarrow;": '\U00002190', - "leftarrowtail;": '\U000021A2', - "leftharpoondown;": '\U000021BD', - "leftharpoonup;": '\U000021BC', - "leftleftarrows;": '\U000021C7', - "leftrightarrow;": '\U00002194', - "leftrightarrows;": '\U000021C6', - "leftrightharpoons;": '\U000021CB', - "leftrightsquigarrow;": '\U000021AD', - "leftthreetimes;": '\U000022CB', - "leg;": '\U000022DA', - "leq;": '\U00002264', - "leqq;": '\U00002266', - "leqslant;": '\U00002A7D', - "les;": '\U00002A7D', - "lescc;": '\U00002AA8', - "lesdot;": '\U00002A7F', - "lesdoto;": '\U00002A81', - "lesdotor;": '\U00002A83', - "lesges;": '\U00002A93', - "lessapprox;": '\U00002A85', - "lessdot;": '\U000022D6', - "lesseqgtr;": '\U000022DA', - "lesseqqgtr;": '\U00002A8B', - "lessgtr;": '\U00002276', - "lesssim;": '\U00002272', - "lfisht;": '\U0000297C', - "lfloor;": '\U0000230A', - "lfr;": '\U0001D529', - "lg;": '\U00002276', - "lgE;": '\U00002A91', - "lhard;": '\U000021BD', - "lharu;": '\U000021BC', - "lharul;": '\U0000296A', - "lhblk;": '\U00002584', - "ljcy;": '\U00000459', - "ll;": '\U0000226A', - "llarr;": '\U000021C7', - "llcorner;": '\U0000231E', - "llhard;": '\U0000296B', - "lltri;": '\U000025FA', - "lmidot;": '\U00000140', - "lmoust;": '\U000023B0', - "lmoustache;": '\U000023B0', - "lnE;": '\U00002268', - "lnap;": '\U00002A89', - "lnapprox;": '\U00002A89', - "lne;": '\U00002A87', - "lneq;": '\U00002A87', - "lneqq;": '\U00002268', - "lnsim;": '\U000022E6', - "loang;": '\U000027EC', - "loarr;": '\U000021FD', - "lobrk;": '\U000027E6', - "longleftarrow;": '\U000027F5', - "longleftrightarrow;": '\U000027F7', - "longmapsto;": '\U000027FC', - "longrightarrow;": '\U000027F6', - "looparrowleft;": '\U000021AB', - "looparrowright;": '\U000021AC', - "lopar;": '\U00002985', - "lopf;": '\U0001D55D', - "loplus;": '\U00002A2D', - "lotimes;": '\U00002A34', - "lowast;": '\U00002217', - "lowbar;": '\U0000005F', - "loz;": '\U000025CA', - "lozenge;": '\U000025CA', - "lozf;": '\U000029EB', - "lpar;": '\U00000028', - "lparlt;": '\U00002993', - "lrarr;": '\U000021C6', - "lrcorner;": '\U0000231F', - "lrhar;": '\U000021CB', - "lrhard;": '\U0000296D', - "lrm;": '\U0000200E', - "lrtri;": '\U000022BF', - "lsaquo;": '\U00002039', - "lscr;": '\U0001D4C1', - "lsh;": '\U000021B0', - "lsim;": '\U00002272', - "lsime;": '\U00002A8D', - "lsimg;": '\U00002A8F', - "lsqb;": '\U0000005B', - "lsquo;": '\U00002018', - "lsquor;": '\U0000201A', - "lstrok;": '\U00000142', - "lt;": '\U0000003C', - "ltcc;": '\U00002AA6', - "ltcir;": '\U00002A79', - "ltdot;": '\U000022D6', - "lthree;": '\U000022CB', - "ltimes;": '\U000022C9', - "ltlarr;": '\U00002976', - "ltquest;": '\U00002A7B', - "ltrPar;": '\U00002996', - "ltri;": '\U000025C3', - "ltrie;": '\U000022B4', - "ltrif;": '\U000025C2', - "lurdshar;": '\U0000294A', - "luruhar;": '\U00002966', - "mDDot;": '\U0000223A', - "macr;": '\U000000AF', - "male;": '\U00002642', - "malt;": '\U00002720', - "maltese;": '\U00002720', - "map;": '\U000021A6', - "mapsto;": '\U000021A6', - "mapstodown;": '\U000021A7', - "mapstoleft;": '\U000021A4', - "mapstoup;": '\U000021A5', - "marker;": '\U000025AE', - "mcomma;": '\U00002A29', - "mcy;": '\U0000043C', - "mdash;": '\U00002014', - "measuredangle;": '\U00002221', - "mfr;": '\U0001D52A', - "mho;": '\U00002127', - "micro;": '\U000000B5', - "mid;": '\U00002223', - "midast;": '\U0000002A', - "midcir;": '\U00002AF0', - "middot;": '\U000000B7', - "minus;": '\U00002212', - "minusb;": '\U0000229F', - "minusd;": '\U00002238', - "minusdu;": '\U00002A2A', - "mlcp;": '\U00002ADB', - "mldr;": '\U00002026', - "mnplus;": '\U00002213', - "models;": '\U000022A7', - "mopf;": '\U0001D55E', - "mp;": '\U00002213', - "mscr;": '\U0001D4C2', - "mstpos;": '\U0000223E', - "mu;": '\U000003BC', - "multimap;": '\U000022B8', - "mumap;": '\U000022B8', - "nLeftarrow;": '\U000021CD', - "nLeftrightarrow;": '\U000021CE', - "nRightarrow;": '\U000021CF', - "nVDash;": '\U000022AF', - "nVdash;": '\U000022AE', - "nabla;": '\U00002207', - "nacute;": '\U00000144', - "nap;": '\U00002249', - "napos;": '\U00000149', - "napprox;": '\U00002249', - "natur;": '\U0000266E', - "natural;": '\U0000266E', - "naturals;": '\U00002115', - "nbsp;": '\U000000A0', - "ncap;": '\U00002A43', - "ncaron;": '\U00000148', - "ncedil;": '\U00000146', - "ncong;": '\U00002247', - "ncup;": '\U00002A42', - "ncy;": '\U0000043D', - "ndash;": '\U00002013', - "ne;": '\U00002260', - "neArr;": '\U000021D7', - "nearhk;": '\U00002924', - "nearr;": '\U00002197', - "nearrow;": '\U00002197', - "nequiv;": '\U00002262', - "nesear;": '\U00002928', - "nexist;": '\U00002204', - "nexists;": '\U00002204', - "nfr;": '\U0001D52B', - "nge;": '\U00002271', - "ngeq;": '\U00002271', - "ngsim;": '\U00002275', - "ngt;": '\U0000226F', - "ngtr;": '\U0000226F', - "nhArr;": '\U000021CE', - "nharr;": '\U000021AE', - "nhpar;": '\U00002AF2', - "ni;": '\U0000220B', - "nis;": '\U000022FC', - "nisd;": '\U000022FA', - "niv;": '\U0000220B', - "njcy;": '\U0000045A', - "nlArr;": '\U000021CD', - "nlarr;": '\U0000219A', - "nldr;": '\U00002025', - "nle;": '\U00002270', - "nleftarrow;": '\U0000219A', - "nleftrightarrow;": '\U000021AE', - "nleq;": '\U00002270', - "nless;": '\U0000226E', - "nlsim;": '\U00002274', - "nlt;": '\U0000226E', - "nltri;": '\U000022EA', - "nltrie;": '\U000022EC', - "nmid;": '\U00002224', - "nopf;": '\U0001D55F', - "not;": '\U000000AC', - "notin;": '\U00002209', - "notinva;": '\U00002209', - "notinvb;": '\U000022F7', - "notinvc;": '\U000022F6', - "notni;": '\U0000220C', - "notniva;": '\U0000220C', - "notnivb;": '\U000022FE', - "notnivc;": '\U000022FD', - "npar;": '\U00002226', - "nparallel;": '\U00002226', - "npolint;": '\U00002A14', - "npr;": '\U00002280', - "nprcue;": '\U000022E0', - "nprec;": '\U00002280', - "nrArr;": '\U000021CF', - "nrarr;": '\U0000219B', - "nrightarrow;": '\U0000219B', - "nrtri;": '\U000022EB', - "nrtrie;": '\U000022ED', - "nsc;": '\U00002281', - "nsccue;": '\U000022E1', - "nscr;": '\U0001D4C3', - "nshortmid;": '\U00002224', - "nshortparallel;": '\U00002226', - "nsim;": '\U00002241', - "nsime;": '\U00002244', - "nsimeq;": '\U00002244', - "nsmid;": '\U00002224', - "nspar;": '\U00002226', - "nsqsube;": '\U000022E2', - "nsqsupe;": '\U000022E3', - "nsub;": '\U00002284', - "nsube;": '\U00002288', - "nsubseteq;": '\U00002288', - "nsucc;": '\U00002281', - "nsup;": '\U00002285', - "nsupe;": '\U00002289', - "nsupseteq;": '\U00002289', - "ntgl;": '\U00002279', - "ntilde;": '\U000000F1', - "ntlg;": '\U00002278', - "ntriangleleft;": '\U000022EA', - "ntrianglelefteq;": '\U000022EC', - "ntriangleright;": '\U000022EB', - "ntrianglerighteq;": '\U000022ED', - "nu;": '\U000003BD', - "num;": '\U00000023', - "numero;": '\U00002116', - "numsp;": '\U00002007', - "nvDash;": '\U000022AD', - "nvHarr;": '\U00002904', - "nvdash;": '\U000022AC', - "nvinfin;": '\U000029DE', - "nvlArr;": '\U00002902', - "nvrArr;": '\U00002903', - "nwArr;": '\U000021D6', - "nwarhk;": '\U00002923', - "nwarr;": '\U00002196', - "nwarrow;": '\U00002196', - "nwnear;": '\U00002927', - "oS;": '\U000024C8', - "oacute;": '\U000000F3', - "oast;": '\U0000229B', - "ocir;": '\U0000229A', - "ocirc;": '\U000000F4', - "ocy;": '\U0000043E', - "odash;": '\U0000229D', - "odblac;": '\U00000151', - "odiv;": '\U00002A38', - "odot;": '\U00002299', - "odsold;": '\U000029BC', - "oelig;": '\U00000153', - "ofcir;": '\U000029BF', - "ofr;": '\U0001D52C', - "ogon;": '\U000002DB', - "ograve;": '\U000000F2', - "ogt;": '\U000029C1', - "ohbar;": '\U000029B5', - "ohm;": '\U000003A9', - "oint;": '\U0000222E', - "olarr;": '\U000021BA', - "olcir;": '\U000029BE', - "olcross;": '\U000029BB', - "oline;": '\U0000203E', - "olt;": '\U000029C0', - "omacr;": '\U0000014D', - "omega;": '\U000003C9', - "omicron;": '\U000003BF', - "omid;": '\U000029B6', - "ominus;": '\U00002296', - "oopf;": '\U0001D560', - "opar;": '\U000029B7', - "operp;": '\U000029B9', - "oplus;": '\U00002295', - "or;": '\U00002228', - "orarr;": '\U000021BB', - "ord;": '\U00002A5D', - "order;": '\U00002134', - "orderof;": '\U00002134', - "ordf;": '\U000000AA', - "ordm;": '\U000000BA', - "origof;": '\U000022B6', - "oror;": '\U00002A56', - "orslope;": '\U00002A57', - "orv;": '\U00002A5B', - "oscr;": '\U00002134', - "oslash;": '\U000000F8', - "osol;": '\U00002298', - "otilde;": '\U000000F5', - "otimes;": '\U00002297', - "otimesas;": '\U00002A36', - "ouml;": '\U000000F6', - "ovbar;": '\U0000233D', - "par;": '\U00002225', - "para;": '\U000000B6', - "parallel;": '\U00002225', - "parsim;": '\U00002AF3', - "parsl;": '\U00002AFD', - "part;": '\U00002202', - "pcy;": '\U0000043F', - "percnt;": '\U00000025', - "period;": '\U0000002E', - "permil;": '\U00002030', - "perp;": '\U000022A5', - "pertenk;": '\U00002031', - "pfr;": '\U0001D52D', - "phi;": '\U000003C6', - "phiv;": '\U000003D5', - "phmmat;": '\U00002133', - "phone;": '\U0000260E', - "pi;": '\U000003C0', - "pitchfork;": '\U000022D4', - "piv;": '\U000003D6', - "planck;": '\U0000210F', - "planckh;": '\U0000210E', - "plankv;": '\U0000210F', - "plus;": '\U0000002B', - "plusacir;": '\U00002A23', - "plusb;": '\U0000229E', - "pluscir;": '\U00002A22', - "plusdo;": '\U00002214', - "plusdu;": '\U00002A25', - "pluse;": '\U00002A72', - "plusmn;": '\U000000B1', - "plussim;": '\U00002A26', - "plustwo;": '\U00002A27', - "pm;": '\U000000B1', - "pointint;": '\U00002A15', - "popf;": '\U0001D561', - "pound;": '\U000000A3', - "pr;": '\U0000227A', - "prE;": '\U00002AB3', - "prap;": '\U00002AB7', - "prcue;": '\U0000227C', - "pre;": '\U00002AAF', - "prec;": '\U0000227A', - "precapprox;": '\U00002AB7', - "preccurlyeq;": '\U0000227C', - "preceq;": '\U00002AAF', - "precnapprox;": '\U00002AB9', - "precneqq;": '\U00002AB5', - "precnsim;": '\U000022E8', - "precsim;": '\U0000227E', - "prime;": '\U00002032', - "primes;": '\U00002119', - "prnE;": '\U00002AB5', - "prnap;": '\U00002AB9', - "prnsim;": '\U000022E8', - "prod;": '\U0000220F', - "profalar;": '\U0000232E', - "profline;": '\U00002312', - "profsurf;": '\U00002313', - "prop;": '\U0000221D', - "propto;": '\U0000221D', - "prsim;": '\U0000227E', - "prurel;": '\U000022B0', - "pscr;": '\U0001D4C5', - "psi;": '\U000003C8', - "puncsp;": '\U00002008', - "qfr;": '\U0001D52E', - "qint;": '\U00002A0C', - "qopf;": '\U0001D562', - "qprime;": '\U00002057', - "qscr;": '\U0001D4C6', - "quaternions;": '\U0000210D', - "quatint;": '\U00002A16', - "quest;": '\U0000003F', - "questeq;": '\U0000225F', - "quot;": '\U00000022', - "rAarr;": '\U000021DB', - "rArr;": '\U000021D2', - "rAtail;": '\U0000291C', - "rBarr;": '\U0000290F', - "rHar;": '\U00002964', - "racute;": '\U00000155', - "radic;": '\U0000221A', - "raemptyv;": '\U000029B3', - "rang;": '\U000027E9', - "rangd;": '\U00002992', - "range;": '\U000029A5', - "rangle;": '\U000027E9', - "raquo;": '\U000000BB', - "rarr;": '\U00002192', - "rarrap;": '\U00002975', - "rarrb;": '\U000021E5', - "rarrbfs;": '\U00002920', - "rarrc;": '\U00002933', - "rarrfs;": '\U0000291E', - "rarrhk;": '\U000021AA', - "rarrlp;": '\U000021AC', - "rarrpl;": '\U00002945', - "rarrsim;": '\U00002974', - "rarrtl;": '\U000021A3', - "rarrw;": '\U0000219D', - "ratail;": '\U0000291A', - "ratio;": '\U00002236', - "rationals;": '\U0000211A', - "rbarr;": '\U0000290D', - "rbbrk;": '\U00002773', - "rbrace;": '\U0000007D', - "rbrack;": '\U0000005D', - "rbrke;": '\U0000298C', - "rbrksld;": '\U0000298E', - "rbrkslu;": '\U00002990', - "rcaron;": '\U00000159', - "rcedil;": '\U00000157', - "rceil;": '\U00002309', - "rcub;": '\U0000007D', - "rcy;": '\U00000440', - "rdca;": '\U00002937', - "rdldhar;": '\U00002969', - "rdquo;": '\U0000201D', - "rdquor;": '\U0000201D', - "rdsh;": '\U000021B3', - "real;": '\U0000211C', - "realine;": '\U0000211B', - "realpart;": '\U0000211C', - "reals;": '\U0000211D', - "rect;": '\U000025AD', - "reg;": '\U000000AE', - "rfisht;": '\U0000297D', - "rfloor;": '\U0000230B', - "rfr;": '\U0001D52F', - "rhard;": '\U000021C1', - "rharu;": '\U000021C0', - "rharul;": '\U0000296C', - "rho;": '\U000003C1', - "rhov;": '\U000003F1', - "rightarrow;": '\U00002192', - "rightarrowtail;": '\U000021A3', - "rightharpoondown;": '\U000021C1', - "rightharpoonup;": '\U000021C0', - "rightleftarrows;": '\U000021C4', - "rightleftharpoons;": '\U000021CC', - "rightrightarrows;": '\U000021C9', - "rightsquigarrow;": '\U0000219D', - "rightthreetimes;": '\U000022CC', - "ring;": '\U000002DA', - "risingdotseq;": '\U00002253', - "rlarr;": '\U000021C4', - "rlhar;": '\U000021CC', - "rlm;": '\U0000200F', - "rmoust;": '\U000023B1', - "rmoustache;": '\U000023B1', - "rnmid;": '\U00002AEE', - "roang;": '\U000027ED', - "roarr;": '\U000021FE', - "robrk;": '\U000027E7', - "ropar;": '\U00002986', - "ropf;": '\U0001D563', - "roplus;": '\U00002A2E', - "rotimes;": '\U00002A35', - "rpar;": '\U00000029', - "rpargt;": '\U00002994', - "rppolint;": '\U00002A12', - "rrarr;": '\U000021C9', - "rsaquo;": '\U0000203A', - "rscr;": '\U0001D4C7', - "rsh;": '\U000021B1', - "rsqb;": '\U0000005D', - "rsquo;": '\U00002019', - "rsquor;": '\U00002019', - "rthree;": '\U000022CC', - "rtimes;": '\U000022CA', - "rtri;": '\U000025B9', - "rtrie;": '\U000022B5', - "rtrif;": '\U000025B8', - "rtriltri;": '\U000029CE', - "ruluhar;": '\U00002968', - "rx;": '\U0000211E', - "sacute;": '\U0000015B', - "sbquo;": '\U0000201A', - "sc;": '\U0000227B', - "scE;": '\U00002AB4', - "scap;": '\U00002AB8', - "scaron;": '\U00000161', - "sccue;": '\U0000227D', - "sce;": '\U00002AB0', - "scedil;": '\U0000015F', - "scirc;": '\U0000015D', - "scnE;": '\U00002AB6', - "scnap;": '\U00002ABA', - "scnsim;": '\U000022E9', - "scpolint;": '\U00002A13', - "scsim;": '\U0000227F', - "scy;": '\U00000441', - "sdot;": '\U000022C5', - "sdotb;": '\U000022A1', - "sdote;": '\U00002A66', - "seArr;": '\U000021D8', - "searhk;": '\U00002925', - "searr;": '\U00002198', - "searrow;": '\U00002198', - "sect;": '\U000000A7', - "semi;": '\U0000003B', - "seswar;": '\U00002929', - "setminus;": '\U00002216', - "setmn;": '\U00002216', - "sext;": '\U00002736', - "sfr;": '\U0001D530', - "sfrown;": '\U00002322', - "sharp;": '\U0000266F', - "shchcy;": '\U00000449', - "shcy;": '\U00000448', - "shortmid;": '\U00002223', - "shortparallel;": '\U00002225', - "shy;": '\U000000AD', - "sigma;": '\U000003C3', - "sigmaf;": '\U000003C2', - "sigmav;": '\U000003C2', - "sim;": '\U0000223C', - "simdot;": '\U00002A6A', - "sime;": '\U00002243', - "simeq;": '\U00002243', - "simg;": '\U00002A9E', - "simgE;": '\U00002AA0', - "siml;": '\U00002A9D', - "simlE;": '\U00002A9F', - "simne;": '\U00002246', - "simplus;": '\U00002A24', - "simrarr;": '\U00002972', - "slarr;": '\U00002190', - "smallsetminus;": '\U00002216', - "smashp;": '\U00002A33', - "smeparsl;": '\U000029E4', - "smid;": '\U00002223', - "smile;": '\U00002323', - "smt;": '\U00002AAA', - "smte;": '\U00002AAC', - "softcy;": '\U0000044C', - "sol;": '\U0000002F', - "solb;": '\U000029C4', - "solbar;": '\U0000233F', - "sopf;": '\U0001D564', - "spades;": '\U00002660', - "spadesuit;": '\U00002660', - "spar;": '\U00002225', - "sqcap;": '\U00002293', - "sqcup;": '\U00002294', - "sqsub;": '\U0000228F', - "sqsube;": '\U00002291', - "sqsubset;": '\U0000228F', - "sqsubseteq;": '\U00002291', - "sqsup;": '\U00002290', - "sqsupe;": '\U00002292', - "sqsupset;": '\U00002290', - "sqsupseteq;": '\U00002292', - "squ;": '\U000025A1', - "square;": '\U000025A1', - "squarf;": '\U000025AA', - "squf;": '\U000025AA', - "srarr;": '\U00002192', - "sscr;": '\U0001D4C8', - "ssetmn;": '\U00002216', - "ssmile;": '\U00002323', - "sstarf;": '\U000022C6', - "star;": '\U00002606', - "starf;": '\U00002605', - "straightepsilon;": '\U000003F5', - "straightphi;": '\U000003D5', - "strns;": '\U000000AF', - "sub;": '\U00002282', - "subE;": '\U00002AC5', - "subdot;": '\U00002ABD', - "sube;": '\U00002286', - "subedot;": '\U00002AC3', - "submult;": '\U00002AC1', - "subnE;": '\U00002ACB', - "subne;": '\U0000228A', - "subplus;": '\U00002ABF', - "subrarr;": '\U00002979', - "subset;": '\U00002282', - "subseteq;": '\U00002286', - "subseteqq;": '\U00002AC5', - "subsetneq;": '\U0000228A', - "subsetneqq;": '\U00002ACB', - "subsim;": '\U00002AC7', - "subsub;": '\U00002AD5', - "subsup;": '\U00002AD3', - "succ;": '\U0000227B', - "succapprox;": '\U00002AB8', - "succcurlyeq;": '\U0000227D', - "succeq;": '\U00002AB0', - "succnapprox;": '\U00002ABA', - "succneqq;": '\U00002AB6', - "succnsim;": '\U000022E9', - "succsim;": '\U0000227F', - "sum;": '\U00002211', - "sung;": '\U0000266A', - "sup;": '\U00002283', - "sup1;": '\U000000B9', - "sup2;": '\U000000B2', - "sup3;": '\U000000B3', - "supE;": '\U00002AC6', - "supdot;": '\U00002ABE', - "supdsub;": '\U00002AD8', - "supe;": '\U00002287', - "supedot;": '\U00002AC4', - "suphsol;": '\U000027C9', - "suphsub;": '\U00002AD7', - "suplarr;": '\U0000297B', - "supmult;": '\U00002AC2', - "supnE;": '\U00002ACC', - "supne;": '\U0000228B', - "supplus;": '\U00002AC0', - "supset;": '\U00002283', - "supseteq;": '\U00002287', - "supseteqq;": '\U00002AC6', - "supsetneq;": '\U0000228B', - "supsetneqq;": '\U00002ACC', - "supsim;": '\U00002AC8', - "supsub;": '\U00002AD4', - "supsup;": '\U00002AD6', - "swArr;": '\U000021D9', - "swarhk;": '\U00002926', - "swarr;": '\U00002199', - "swarrow;": '\U00002199', - "swnwar;": '\U0000292A', - "szlig;": '\U000000DF', - "target;": '\U00002316', - "tau;": '\U000003C4', - "tbrk;": '\U000023B4', - "tcaron;": '\U00000165', - "tcedil;": '\U00000163', - "tcy;": '\U00000442', - "tdot;": '\U000020DB', - "telrec;": '\U00002315', - "tfr;": '\U0001D531', - "there4;": '\U00002234', - "therefore;": '\U00002234', - "theta;": '\U000003B8', - "thetasym;": '\U000003D1', - "thetav;": '\U000003D1', - "thickapprox;": '\U00002248', - "thicksim;": '\U0000223C', - "thinsp;": '\U00002009', - "thkap;": '\U00002248', - "thksim;": '\U0000223C', - "thorn;": '\U000000FE', - "tilde;": '\U000002DC', - "times;": '\U000000D7', - "timesb;": '\U000022A0', - "timesbar;": '\U00002A31', - "timesd;": '\U00002A30', - "tint;": '\U0000222D', - "toea;": '\U00002928', - "top;": '\U000022A4', - "topbot;": '\U00002336', - "topcir;": '\U00002AF1', - "topf;": '\U0001D565', - "topfork;": '\U00002ADA', - "tosa;": '\U00002929', - "tprime;": '\U00002034', - "trade;": '\U00002122', - "triangle;": '\U000025B5', - "triangledown;": '\U000025BF', - "triangleleft;": '\U000025C3', - "trianglelefteq;": '\U000022B4', - "triangleq;": '\U0000225C', - "triangleright;": '\U000025B9', - "trianglerighteq;": '\U000022B5', - "tridot;": '\U000025EC', - "trie;": '\U0000225C', - "triminus;": '\U00002A3A', - "triplus;": '\U00002A39', - "trisb;": '\U000029CD', - "tritime;": '\U00002A3B', - "trpezium;": '\U000023E2', - "tscr;": '\U0001D4C9', - "tscy;": '\U00000446', - "tshcy;": '\U0000045B', - "tstrok;": '\U00000167', - "twixt;": '\U0000226C', - "twoheadleftarrow;": '\U0000219E', - "twoheadrightarrow;": '\U000021A0', - "uArr;": '\U000021D1', - "uHar;": '\U00002963', - "uacute;": '\U000000FA', - "uarr;": '\U00002191', - "ubrcy;": '\U0000045E', - "ubreve;": '\U0000016D', - "ucirc;": '\U000000FB', - "ucy;": '\U00000443', - "udarr;": '\U000021C5', - "udblac;": '\U00000171', - "udhar;": '\U0000296E', - "ufisht;": '\U0000297E', - "ufr;": '\U0001D532', - "ugrave;": '\U000000F9', - "uharl;": '\U000021BF', - "uharr;": '\U000021BE', - "uhblk;": '\U00002580', - "ulcorn;": '\U0000231C', - "ulcorner;": '\U0000231C', - "ulcrop;": '\U0000230F', - "ultri;": '\U000025F8', - "umacr;": '\U0000016B', - "uml;": '\U000000A8', - "uogon;": '\U00000173', - "uopf;": '\U0001D566', - "uparrow;": '\U00002191', - "updownarrow;": '\U00002195', - "upharpoonleft;": '\U000021BF', - "upharpoonright;": '\U000021BE', - "uplus;": '\U0000228E', - "upsi;": '\U000003C5', - "upsih;": '\U000003D2', - "upsilon;": '\U000003C5', - "upuparrows;": '\U000021C8', - "urcorn;": '\U0000231D', - "urcorner;": '\U0000231D', - "urcrop;": '\U0000230E', - "uring;": '\U0000016F', - "urtri;": '\U000025F9', - "uscr;": '\U0001D4CA', - "utdot;": '\U000022F0', - "utilde;": '\U00000169', - "utri;": '\U000025B5', - "utrif;": '\U000025B4', - "uuarr;": '\U000021C8', - "uuml;": '\U000000FC', - "uwangle;": '\U000029A7', - "vArr;": '\U000021D5', - "vBar;": '\U00002AE8', - "vBarv;": '\U00002AE9', - "vDash;": '\U000022A8', - "vangrt;": '\U0000299C', - "varepsilon;": '\U000003F5', - "varkappa;": '\U000003F0', - "varnothing;": '\U00002205', - "varphi;": '\U000003D5', - "varpi;": '\U000003D6', - "varpropto;": '\U0000221D', - "varr;": '\U00002195', - "varrho;": '\U000003F1', - "varsigma;": '\U000003C2', - "vartheta;": '\U000003D1', - "vartriangleleft;": '\U000022B2', - "vartriangleright;": '\U000022B3', - "vcy;": '\U00000432', - "vdash;": '\U000022A2', - "vee;": '\U00002228', - "veebar;": '\U000022BB', - "veeeq;": '\U0000225A', - "vellip;": '\U000022EE', - "verbar;": '\U0000007C', - "vert;": '\U0000007C', - "vfr;": '\U0001D533', - "vltri;": '\U000022B2', - "vopf;": '\U0001D567', - "vprop;": '\U0000221D', - "vrtri;": '\U000022B3', - "vscr;": '\U0001D4CB', - "vzigzag;": '\U0000299A', - "wcirc;": '\U00000175', - "wedbar;": '\U00002A5F', - "wedge;": '\U00002227', - "wedgeq;": '\U00002259', - "weierp;": '\U00002118', - "wfr;": '\U0001D534', - "wopf;": '\U0001D568', - "wp;": '\U00002118', - "wr;": '\U00002240', - "wreath;": '\U00002240', - "wscr;": '\U0001D4CC', - "xcap;": '\U000022C2', - "xcirc;": '\U000025EF', - "xcup;": '\U000022C3', - "xdtri;": '\U000025BD', - "xfr;": '\U0001D535', - "xhArr;": '\U000027FA', - "xharr;": '\U000027F7', - "xi;": '\U000003BE', - "xlArr;": '\U000027F8', - "xlarr;": '\U000027F5', - "xmap;": '\U000027FC', - "xnis;": '\U000022FB', - "xodot;": '\U00002A00', - "xopf;": '\U0001D569', - "xoplus;": '\U00002A01', - "xotime;": '\U00002A02', - "xrArr;": '\U000027F9', - "xrarr;": '\U000027F6', - "xscr;": '\U0001D4CD', - "xsqcup;": '\U00002A06', - "xuplus;": '\U00002A04', - "xutri;": '\U000025B3', - "xvee;": '\U000022C1', - "xwedge;": '\U000022C0', - "yacute;": '\U000000FD', - "yacy;": '\U0000044F', - "ycirc;": '\U00000177', - "ycy;": '\U0000044B', - "yen;": '\U000000A5', - "yfr;": '\U0001D536', - "yicy;": '\U00000457', - "yopf;": '\U0001D56A', - "yscr;": '\U0001D4CE', - "yucy;": '\U0000044E', - "yuml;": '\U000000FF', - "zacute;": '\U0000017A', - "zcaron;": '\U0000017E', - "zcy;": '\U00000437', - "zdot;": '\U0000017C', - "zeetrf;": '\U00002128', - "zeta;": '\U000003B6', - "zfr;": '\U0001D537', - "zhcy;": '\U00000436', - "zigrarr;": '\U000021DD', - "zopf;": '\U0001D56B', - "zscr;": '\U0001D4CF', - "zwj;": '\U0000200D', - "zwnj;": '\U0000200C', - "AElig": '\U000000C6', - "AMP": '\U00000026', - "Aacute": '\U000000C1', - "Acirc": '\U000000C2', - "Agrave": '\U000000C0', - "Aring": '\U000000C5', - "Atilde": '\U000000C3', - "Auml": '\U000000C4', - "COPY": '\U000000A9', - "Ccedil": '\U000000C7', - "ETH": '\U000000D0', - "Eacute": '\U000000C9', - "Ecirc": '\U000000CA', - "Egrave": '\U000000C8', - "Euml": '\U000000CB', - "GT": '\U0000003E', - "Iacute": '\U000000CD', - "Icirc": '\U000000CE', - "Igrave": '\U000000CC', - "Iuml": '\U000000CF', - "LT": '\U0000003C', - "Ntilde": '\U000000D1', - "Oacute": '\U000000D3', - "Ocirc": '\U000000D4', - "Ograve": '\U000000D2', - "Oslash": '\U000000D8', - "Otilde": '\U000000D5', - "Ouml": '\U000000D6', - "QUOT": '\U00000022', - "REG": '\U000000AE', - "THORN": '\U000000DE', - "Uacute": '\U000000DA', - "Ucirc": '\U000000DB', - "Ugrave": '\U000000D9', - "Uuml": '\U000000DC', - "Yacute": '\U000000DD', - "aacute": '\U000000E1', - "acirc": '\U000000E2', - "acute": '\U000000B4', - "aelig": '\U000000E6', - "agrave": '\U000000E0', - "amp": '\U00000026', - "aring": '\U000000E5', - "atilde": '\U000000E3', - "auml": '\U000000E4', - "brvbar": '\U000000A6', - "ccedil": '\U000000E7', - "cedil": '\U000000B8', - "cent": '\U000000A2', - "copy": '\U000000A9', - "curren": '\U000000A4', - "deg": '\U000000B0', - "divide": '\U000000F7', - "eacute": '\U000000E9', - "ecirc": '\U000000EA', - "egrave": '\U000000E8', - "eth": '\U000000F0', - "euml": '\U000000EB', - "frac12": '\U000000BD', - "frac14": '\U000000BC', - "frac34": '\U000000BE', - "gt": '\U0000003E', - "iacute": '\U000000ED', - "icirc": '\U000000EE', - "iexcl": '\U000000A1', - "igrave": '\U000000EC', - "iquest": '\U000000BF', - "iuml": '\U000000EF', - "laquo": '\U000000AB', - "lt": '\U0000003C', - "macr": '\U000000AF', - "micro": '\U000000B5', - "middot": '\U000000B7', - "nbsp": '\U000000A0', - "not": '\U000000AC', - "ntilde": '\U000000F1', - "oacute": '\U000000F3', - "ocirc": '\U000000F4', - "ograve": '\U000000F2', - "ordf": '\U000000AA', - "ordm": '\U000000BA', - "oslash": '\U000000F8', - "otilde": '\U000000F5', - "ouml": '\U000000F6', - "para": '\U000000B6', - "plusmn": '\U000000B1', - "pound": '\U000000A3', - "quot": '\U00000022', - "raquo": '\U000000BB', - "reg": '\U000000AE', - "sect": '\U000000A7', - "shy": '\U000000AD', - "sup1": '\U000000B9', - "sup2": '\U000000B2', - "sup3": '\U000000B3', - "szlig": '\U000000DF', - "thorn": '\U000000FE', - "times": '\U000000D7', - "uacute": '\U000000FA', - "ucirc": '\U000000FB', - "ugrave": '\U000000F9', - "uml": '\U000000A8', - "uuml": '\U000000FC', - "yacute": '\U000000FD', - "yen": '\U000000A5', - "yuml": '\U000000FF', -} - -// HTML entities that are two unicode codepoints. -var entity2 = map[string][2]rune{ - // TODO(nigeltao): Handle replacements that are wider than their names. - // "nLt;": {'\u226A', '\u20D2'}, - // "nGt;": {'\u226B', '\u20D2'}, - "NotEqualTilde;": {'\u2242', '\u0338'}, - "NotGreaterFullEqual;": {'\u2267', '\u0338'}, - "NotGreaterGreater;": {'\u226B', '\u0338'}, - "NotGreaterSlantEqual;": {'\u2A7E', '\u0338'}, - "NotHumpDownHump;": {'\u224E', '\u0338'}, - "NotHumpEqual;": {'\u224F', '\u0338'}, - "NotLeftTriangleBar;": {'\u29CF', '\u0338'}, - "NotLessLess;": {'\u226A', '\u0338'}, - "NotLessSlantEqual;": {'\u2A7D', '\u0338'}, - "NotNestedGreaterGreater;": {'\u2AA2', '\u0338'}, - "NotNestedLessLess;": {'\u2AA1', '\u0338'}, - "NotPrecedesEqual;": {'\u2AAF', '\u0338'}, - "NotRightTriangleBar;": {'\u29D0', '\u0338'}, - "NotSquareSubset;": {'\u228F', '\u0338'}, - "NotSquareSuperset;": {'\u2290', '\u0338'}, - "NotSubset;": {'\u2282', '\u20D2'}, - "NotSucceedsEqual;": {'\u2AB0', '\u0338'}, - "NotSucceedsTilde;": {'\u227F', '\u0338'}, - "NotSuperset;": {'\u2283', '\u20D2'}, - "ThickSpace;": {'\u205F', '\u200A'}, - "acE;": {'\u223E', '\u0333'}, - "bne;": {'\u003D', '\u20E5'}, - "bnequiv;": {'\u2261', '\u20E5'}, - "caps;": {'\u2229', '\uFE00'}, - "cups;": {'\u222A', '\uFE00'}, - "fjlig;": {'\u0066', '\u006A'}, - "gesl;": {'\u22DB', '\uFE00'}, - "gvertneqq;": {'\u2269', '\uFE00'}, - "gvnE;": {'\u2269', '\uFE00'}, - "lates;": {'\u2AAD', '\uFE00'}, - "lesg;": {'\u22DA', '\uFE00'}, - "lvertneqq;": {'\u2268', '\uFE00'}, - "lvnE;": {'\u2268', '\uFE00'}, - "nGg;": {'\u22D9', '\u0338'}, - "nGtv;": {'\u226B', '\u0338'}, - "nLl;": {'\u22D8', '\u0338'}, - "nLtv;": {'\u226A', '\u0338'}, - "nang;": {'\u2220', '\u20D2'}, - "napE;": {'\u2A70', '\u0338'}, - "napid;": {'\u224B', '\u0338'}, - "nbump;": {'\u224E', '\u0338'}, - "nbumpe;": {'\u224F', '\u0338'}, - "ncongdot;": {'\u2A6D', '\u0338'}, - "nedot;": {'\u2250', '\u0338'}, - "nesim;": {'\u2242', '\u0338'}, - "ngE;": {'\u2267', '\u0338'}, - "ngeqq;": {'\u2267', '\u0338'}, - "ngeqslant;": {'\u2A7E', '\u0338'}, - "nges;": {'\u2A7E', '\u0338'}, - "nlE;": {'\u2266', '\u0338'}, - "nleqq;": {'\u2266', '\u0338'}, - "nleqslant;": {'\u2A7D', '\u0338'}, - "nles;": {'\u2A7D', '\u0338'}, - "notinE;": {'\u22F9', '\u0338'}, - "notindot;": {'\u22F5', '\u0338'}, - "nparsl;": {'\u2AFD', '\u20E5'}, - "npart;": {'\u2202', '\u0338'}, - "npre;": {'\u2AAF', '\u0338'}, - "npreceq;": {'\u2AAF', '\u0338'}, - "nrarrc;": {'\u2933', '\u0338'}, - "nrarrw;": {'\u219D', '\u0338'}, - "nsce;": {'\u2AB0', '\u0338'}, - "nsubE;": {'\u2AC5', '\u0338'}, - "nsubset;": {'\u2282', '\u20D2'}, - "nsubseteqq;": {'\u2AC5', '\u0338'}, - "nsucceq;": {'\u2AB0', '\u0338'}, - "nsupE;": {'\u2AC6', '\u0338'}, - "nsupset;": {'\u2283', '\u20D2'}, - "nsupseteqq;": {'\u2AC6', '\u0338'}, - "nvap;": {'\u224D', '\u20D2'}, - "nvge;": {'\u2265', '\u20D2'}, - "nvgt;": {'\u003E', '\u20D2'}, - "nvle;": {'\u2264', '\u20D2'}, - "nvlt;": {'\u003C', '\u20D2'}, - "nvltrie;": {'\u22B4', '\u20D2'}, - "nvrtrie;": {'\u22B5', '\u20D2'}, - "nvsim;": {'\u223C', '\u20D2'}, - "race;": {'\u223D', '\u0331'}, - "smtes;": {'\u2AAC', '\uFE00'}, - "sqcaps;": {'\u2293', '\uFE00'}, - "sqcups;": {'\u2294', '\uFE00'}, - "varsubsetneq;": {'\u228A', '\uFE00'}, - "varsubsetneqq;": {'\u2ACB', '\uFE00'}, - "varsupsetneq;": {'\u228B', '\uFE00'}, - "varsupsetneqq;": {'\u2ACC', '\uFE00'}, - "vnsub;": {'\u2282', '\u20D2'}, - "vnsup;": {'\u2283', '\u20D2'}, - "vsubnE;": {'\u2ACB', '\uFE00'}, - "vsubne;": {'\u228A', '\uFE00'}, - "vsupnE;": {'\u2ACC', '\uFE00'}, - "vsupne;": {'\u228B', '\uFE00'}, -} diff --git a/vendor/golang.org/x/net/html/escape.go b/vendor/golang.org/x/net/html/escape.go deleted file mode 100644 index d856139620..0000000000 --- a/vendor/golang.org/x/net/html/escape.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "bytes" - "strings" - "unicode/utf8" -) - -// These replacements permit compatibility with old numeric entities that -// assumed Windows-1252 encoding. -// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference -var replacementTable = [...]rune{ - '\u20AC', // First entry is what 0x80 should be replaced with. - '\u0081', - '\u201A', - '\u0192', - '\u201E', - '\u2026', - '\u2020', - '\u2021', - '\u02C6', - '\u2030', - '\u0160', - '\u2039', - '\u0152', - '\u008D', - '\u017D', - '\u008F', - '\u0090', - '\u2018', - '\u2019', - '\u201C', - '\u201D', - '\u2022', - '\u2013', - '\u2014', - '\u02DC', - '\u2122', - '\u0161', - '\u203A', - '\u0153', - '\u009D', - '\u017E', - '\u0178', // Last entry is 0x9F. - // 0x00->'\uFFFD' is handled programmatically. - // 0x0D->'\u000D' is a no-op. -} - -// unescapeEntity reads an entity like "<" from b[src:] and writes the -// corresponding "<" to b[dst:], returning the incremented dst and src cursors. -// Precondition: b[src] == '&' && dst <= src. -// attribute should be true if parsing an attribute value. -func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) { - // https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference - - // i starts at 1 because we already know that s[0] == '&'. - i, s := 1, b[src:] - - if len(s) <= 1 { - b[dst] = b[src] - return dst + 1, src + 1 - } - - if s[i] == '#' { - if len(s) <= 3 { // We need to have at least "&#.". - b[dst] = b[src] - return dst + 1, src + 1 - } - i++ - c := s[i] - hex := false - if c == 'x' || c == 'X' { - hex = true - i++ - } - - x := '\x00' - for i < len(s) { - c = s[i] - i++ - if hex { - if '0' <= c && c <= '9' { - x = 16*x + rune(c) - '0' - continue - } else if 'a' <= c && c <= 'f' { - x = 16*x + rune(c) - 'a' + 10 - continue - } else if 'A' <= c && c <= 'F' { - x = 16*x + rune(c) - 'A' + 10 - continue - } - } else if '0' <= c && c <= '9' { - x = 10*x + rune(c) - '0' - continue - } - if c != ';' { - i-- - } - break - } - - if i <= 3 { // No characters matched. - b[dst] = b[src] - return dst + 1, src + 1 - } - - if 0x80 <= x && x <= 0x9F { - // Replace characters from Windows-1252 with UTF-8 equivalents. - x = replacementTable[x-0x80] - } else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF { - // Replace invalid characters with the replacement character. - x = '\uFFFD' - } - - return dst + utf8.EncodeRune(b[dst:], x), src + i - } - - // Consume the maximum number of characters possible, with the - // consumed characters matching one of the named references. - - for i < len(s) { - c := s[i] - i++ - // Lower-cased characters are more common in entities, so we check for them first. - if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { - continue - } - if c != ';' { - i-- - } - break - } - - entityName := string(s[1:i]) - if entityName == "" { - // No-op. - } else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' { - // No-op. - } else if x := entity[entityName]; x != 0 { - return dst + utf8.EncodeRune(b[dst:], x), src + i - } else if x := entity2[entityName]; x[0] != 0 { - dst1 := dst + utf8.EncodeRune(b[dst:], x[0]) - return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i - } else if !attribute { - maxLen := len(entityName) - 1 - if maxLen > longestEntityWithoutSemicolon { - maxLen = longestEntityWithoutSemicolon - } - for j := maxLen; j > 1; j-- { - if x := entity[entityName[:j]]; x != 0 { - return dst + utf8.EncodeRune(b[dst:], x), src + j + 1 - } - } - } - - dst1, src1 = dst+i, src+i - copy(b[dst:dst1], b[src:src1]) - return dst1, src1 -} - -// unescape unescapes b's entities in-place, so that "a<b" becomes "a': - esc = ">" - case '"': - // """ is shorter than """. - esc = """ - case '\r': - esc = " " - default: - panic("unrecognized escape character") - } - s = s[i+1:] - if _, err := w.WriteString(esc); err != nil { - return err - } - i = strings.IndexAny(s, escapedChars) - } - _, err := w.WriteString(s) - return err -} - -// EscapeString escapes special characters like "<" to become "<". It -// escapes only five such characters: <, >, &, ' and ". -// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't -// always true. -func EscapeString(s string) string { - if strings.IndexAny(s, escapedChars) == -1 { - return s - } - var buf bytes.Buffer - escape(&buf, s) - return buf.String() -} - -// UnescapeString unescapes entities like "<" to become "<". It unescapes a -// larger range of entities than EscapeString escapes. For example, "á" -// unescapes to "á", as does "á" and "&xE1;". -// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't -// always true. -func UnescapeString(s string) string { - for _, c := range s { - if c == '&' { - return string(unescape([]byte(s), false)) - } - } - return s -} diff --git a/vendor/golang.org/x/net/html/foreign.go b/vendor/golang.org/x/net/html/foreign.go deleted file mode 100644 index 01477a9639..0000000000 --- a/vendor/golang.org/x/net/html/foreign.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "strings" -) - -func adjustAttributeNames(aa []Attribute, nameMap map[string]string) { - for i := range aa { - if newName, ok := nameMap[aa[i].Key]; ok { - aa[i].Key = newName - } - } -} - -func adjustForeignAttributes(aa []Attribute) { - for i, a := range aa { - if a.Key == "" || a.Key[0] != 'x' { - continue - } - switch a.Key { - case "xlink:actuate", "xlink:arcrole", "xlink:href", "xlink:role", "xlink:show", - "xlink:title", "xlink:type", "xml:base", "xml:lang", "xml:space", "xmlns:xlink": - j := strings.Index(a.Key, ":") - aa[i].Namespace = a.Key[:j] - aa[i].Key = a.Key[j+1:] - } - } -} - -func htmlIntegrationPoint(n *Node) bool { - if n.Type != ElementNode { - return false - } - switch n.Namespace { - case "math": - if n.Data == "annotation-xml" { - for _, a := range n.Attr { - if a.Key == "encoding" { - val := strings.ToLower(a.Val) - if val == "text/html" || val == "application/xhtml+xml" { - return true - } - } - } - } - case "svg": - switch n.Data { - case "desc", "foreignObject", "title": - return true - } - } - return false -} - -func mathMLTextIntegrationPoint(n *Node) bool { - if n.Namespace != "math" { - return false - } - switch n.Data { - case "mi", "mo", "mn", "ms", "mtext": - return true - } - return false -} - -// Section 12.2.6.5. -var breakout = map[string]bool{ - "b": true, - "big": true, - "blockquote": true, - "body": true, - "br": true, - "center": true, - "code": true, - "dd": true, - "div": true, - "dl": true, - "dt": true, - "em": true, - "embed": true, - "h1": true, - "h2": true, - "h3": true, - "h4": true, - "h5": true, - "h6": true, - "head": true, - "hr": true, - "i": true, - "img": true, - "li": true, - "listing": true, - "menu": true, - "meta": true, - "nobr": true, - "ol": true, - "p": true, - "pre": true, - "ruby": true, - "s": true, - "small": true, - "span": true, - "strong": true, - "strike": true, - "sub": true, - "sup": true, - "table": true, - "tt": true, - "u": true, - "ul": true, - "var": true, -} - -// Section 12.2.6.5. -var svgTagNameAdjustments = map[string]string{ - "altglyph": "altGlyph", - "altglyphdef": "altGlyphDef", - "altglyphitem": "altGlyphItem", - "animatecolor": "animateColor", - "animatemotion": "animateMotion", - "animatetransform": "animateTransform", - "clippath": "clipPath", - "feblend": "feBlend", - "fecolormatrix": "feColorMatrix", - "fecomponenttransfer": "feComponentTransfer", - "fecomposite": "feComposite", - "feconvolvematrix": "feConvolveMatrix", - "fediffuselighting": "feDiffuseLighting", - "fedisplacementmap": "feDisplacementMap", - "fedistantlight": "feDistantLight", - "feflood": "feFlood", - "fefunca": "feFuncA", - "fefuncb": "feFuncB", - "fefuncg": "feFuncG", - "fefuncr": "feFuncR", - "fegaussianblur": "feGaussianBlur", - "feimage": "feImage", - "femerge": "feMerge", - "femergenode": "feMergeNode", - "femorphology": "feMorphology", - "feoffset": "feOffset", - "fepointlight": "fePointLight", - "fespecularlighting": "feSpecularLighting", - "fespotlight": "feSpotLight", - "fetile": "feTile", - "feturbulence": "feTurbulence", - "foreignobject": "foreignObject", - "glyphref": "glyphRef", - "lineargradient": "linearGradient", - "radialgradient": "radialGradient", - "textpath": "textPath", -} - -// Section 12.2.6.1 -var mathMLAttributeAdjustments = map[string]string{ - "definitionurl": "definitionURL", -} - -var svgAttributeAdjustments = map[string]string{ - "attributename": "attributeName", - "attributetype": "attributeType", - "basefrequency": "baseFrequency", - "baseprofile": "baseProfile", - "calcmode": "calcMode", - "clippathunits": "clipPathUnits", - "contentscripttype": "contentScriptType", - "contentstyletype": "contentStyleType", - "diffuseconstant": "diffuseConstant", - "edgemode": "edgeMode", - "externalresourcesrequired": "externalResourcesRequired", - "filterres": "filterRes", - "filterunits": "filterUnits", - "glyphref": "glyphRef", - "gradienttransform": "gradientTransform", - "gradientunits": "gradientUnits", - "kernelmatrix": "kernelMatrix", - "kernelunitlength": "kernelUnitLength", - "keypoints": "keyPoints", - "keysplines": "keySplines", - "keytimes": "keyTimes", - "lengthadjust": "lengthAdjust", - "limitingconeangle": "limitingConeAngle", - "markerheight": "markerHeight", - "markerunits": "markerUnits", - "markerwidth": "markerWidth", - "maskcontentunits": "maskContentUnits", - "maskunits": "maskUnits", - "numoctaves": "numOctaves", - "pathlength": "pathLength", - "patterncontentunits": "patternContentUnits", - "patterntransform": "patternTransform", - "patternunits": "patternUnits", - "pointsatx": "pointsAtX", - "pointsaty": "pointsAtY", - "pointsatz": "pointsAtZ", - "preservealpha": "preserveAlpha", - "preserveaspectratio": "preserveAspectRatio", - "primitiveunits": "primitiveUnits", - "refx": "refX", - "refy": "refY", - "repeatcount": "repeatCount", - "repeatdur": "repeatDur", - "requiredextensions": "requiredExtensions", - "requiredfeatures": "requiredFeatures", - "specularconstant": "specularConstant", - "specularexponent": "specularExponent", - "spreadmethod": "spreadMethod", - "startoffset": "startOffset", - "stddeviation": "stdDeviation", - "stitchtiles": "stitchTiles", - "surfacescale": "surfaceScale", - "systemlanguage": "systemLanguage", - "tablevalues": "tableValues", - "targetx": "targetX", - "targety": "targetY", - "textlength": "textLength", - "viewbox": "viewBox", - "viewtarget": "viewTarget", - "xchannelselector": "xChannelSelector", - "ychannelselector": "yChannelSelector", - "zoomandpan": "zoomAndPan", -} diff --git a/vendor/golang.org/x/net/html/node.go b/vendor/golang.org/x/net/html/node.go deleted file mode 100644 index 2c1cade607..0000000000 --- a/vendor/golang.org/x/net/html/node.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "golang.org/x/net/html/atom" -) - -// A NodeType is the type of a Node. -type NodeType uint32 - -const ( - ErrorNode NodeType = iota - TextNode - DocumentNode - ElementNode - CommentNode - DoctypeNode - scopeMarkerNode -) - -// Section 12.2.4.3 says "The markers are inserted when entering applet, -// object, marquee, template, td, th, and caption elements, and are used -// to prevent formatting from "leaking" into applet, object, marquee, -// template, td, th, and caption elements". -var scopeMarker = Node{Type: scopeMarkerNode} - -// A Node consists of a NodeType and some Data (tag name for element nodes, -// content for text) and are part of a tree of Nodes. Element nodes may also -// have a Namespace and contain a slice of Attributes. Data is unescaped, so -// that it looks like "a 0 { - return (*s)[i-1] - } - return nil -} - -// index returns the index of the top-most occurrence of n in the stack, or -1 -// if n is not present. -func (s *nodeStack) index(n *Node) int { - for i := len(*s) - 1; i >= 0; i-- { - if (*s)[i] == n { - return i - } - } - return -1 -} - -// contains returns whether a is within s. -func (s *nodeStack) contains(a atom.Atom) bool { - for _, n := range *s { - if n.DataAtom == a { - return true - } - } - return false -} - -// insert inserts a node at the given index. -func (s *nodeStack) insert(i int, n *Node) { - (*s) = append(*s, nil) - copy((*s)[i+1:], (*s)[i:]) - (*s)[i] = n -} - -// remove removes a node from the stack. It is a no-op if n is not present. -func (s *nodeStack) remove(n *Node) { - i := s.index(n) - if i == -1 { - return - } - copy((*s)[i:], (*s)[i+1:]) - j := len(*s) - 1 - (*s)[j] = nil - *s = (*s)[:j] -} - -type insertionModeStack []insertionMode - -func (s *insertionModeStack) pop() (im insertionMode) { - i := len(*s) - im = (*s)[i-1] - *s = (*s)[:i-1] - return im -} - -func (s *insertionModeStack) top() insertionMode { - if i := len(*s); i > 0 { - return (*s)[i-1] - } - return nil -} diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go deleted file mode 100644 index 64a5793725..0000000000 --- a/vendor/golang.org/x/net/html/parse.go +++ /dev/null @@ -1,2311 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "errors" - "fmt" - "io" - "strings" - - a "golang.org/x/net/html/atom" -) - -// A parser implements the HTML5 parsing algorithm: -// https://html.spec.whatwg.org/multipage/syntax.html#tree-construction -type parser struct { - // tokenizer provides the tokens for the parser. - tokenizer *Tokenizer - // tok is the most recently read token. - tok Token - // Self-closing tags like
are treated as start tags, except that - // hasSelfClosingToken is set while they are being processed. - hasSelfClosingToken bool - // doc is the document root element. - doc *Node - // The stack of open elements (section 12.2.4.2) and active formatting - // elements (section 12.2.4.3). - oe, afe nodeStack - // Element pointers (section 12.2.4.4). - head, form *Node - // Other parsing state flags (section 12.2.4.5). - scripting, framesetOK bool - // The stack of template insertion modes - templateStack insertionModeStack - // im is the current insertion mode. - im insertionMode - // originalIM is the insertion mode to go back to after completing a text - // or inTableText insertion mode. - originalIM insertionMode - // fosterParenting is whether new elements should be inserted according to - // the foster parenting rules (section 12.2.6.1). - fosterParenting bool - // quirks is whether the parser is operating in "quirks mode." - quirks bool - // fragment is whether the parser is parsing an HTML fragment. - fragment bool - // context is the context element when parsing an HTML fragment - // (section 12.4). - context *Node -} - -func (p *parser) top() *Node { - if n := p.oe.top(); n != nil { - return n - } - return p.doc -} - -// Stop tags for use in popUntil. These come from section 12.2.4.2. -var ( - defaultScopeStopTags = map[string][]a.Atom{ - "": {a.Applet, a.Caption, a.Html, a.Table, a.Td, a.Th, a.Marquee, a.Object, a.Template}, - "math": {a.AnnotationXml, a.Mi, a.Mn, a.Mo, a.Ms, a.Mtext}, - "svg": {a.Desc, a.ForeignObject, a.Title}, - } -) - -type scope int - -const ( - defaultScope scope = iota - listItemScope - buttonScope - tableScope - tableRowScope - tableBodyScope - selectScope -) - -// popUntil pops the stack of open elements at the highest element whose tag -// is in matchTags, provided there is no higher element in the scope's stop -// tags (as defined in section 12.2.4.2). It returns whether or not there was -// such an element. If there was not, popUntil leaves the stack unchanged. -// -// For example, the set of stop tags for table scope is: "html", "table". If -// the stack was: -// ["html", "body", "font", "table", "b", "i", "u"] -// then popUntil(tableScope, "font") would return false, but -// popUntil(tableScope, "i") would return true and the stack would become: -// ["html", "body", "font", "table", "b"] -// -// If an element's tag is in both the stop tags and matchTags, then the stack -// will be popped and the function returns true (provided, of course, there was -// no higher element in the stack that was also in the stop tags). For example, -// popUntil(tableScope, "table") returns true and leaves: -// ["html", "body", "font"] -func (p *parser) popUntil(s scope, matchTags ...a.Atom) bool { - if i := p.indexOfElementInScope(s, matchTags...); i != -1 { - p.oe = p.oe[:i] - return true - } - return false -} - -// indexOfElementInScope returns the index in p.oe of the highest element whose -// tag is in matchTags that is in scope. If no matching element is in scope, it -// returns -1. -func (p *parser) indexOfElementInScope(s scope, matchTags ...a.Atom) int { - for i := len(p.oe) - 1; i >= 0; i-- { - tagAtom := p.oe[i].DataAtom - if p.oe[i].Namespace == "" { - for _, t := range matchTags { - if t == tagAtom { - return i - } - } - switch s { - case defaultScope: - // No-op. - case listItemScope: - if tagAtom == a.Ol || tagAtom == a.Ul { - return -1 - } - case buttonScope: - if tagAtom == a.Button { - return -1 - } - case tableScope: - if tagAtom == a.Html || tagAtom == a.Table || tagAtom == a.Template { - return -1 - } - case selectScope: - if tagAtom != a.Optgroup && tagAtom != a.Option { - return -1 - } - default: - panic("unreachable") - } - } - switch s { - case defaultScope, listItemScope, buttonScope: - for _, t := range defaultScopeStopTags[p.oe[i].Namespace] { - if t == tagAtom { - return -1 - } - } - } - } - return -1 -} - -// elementInScope is like popUntil, except that it doesn't modify the stack of -// open elements. -func (p *parser) elementInScope(s scope, matchTags ...a.Atom) bool { - return p.indexOfElementInScope(s, matchTags...) != -1 -} - -// clearStackToContext pops elements off the stack of open elements until a -// scope-defined element is found. -func (p *parser) clearStackToContext(s scope) { - for i := len(p.oe) - 1; i >= 0; i-- { - tagAtom := p.oe[i].DataAtom - switch s { - case tableScope: - if tagAtom == a.Html || tagAtom == a.Table || tagAtom == a.Template { - p.oe = p.oe[:i+1] - return - } - case tableRowScope: - if tagAtom == a.Html || tagAtom == a.Tr || tagAtom == a.Template { - p.oe = p.oe[:i+1] - return - } - case tableBodyScope: - if tagAtom == a.Html || tagAtom == a.Tbody || tagAtom == a.Tfoot || tagAtom == a.Thead || tagAtom == a.Template { - p.oe = p.oe[:i+1] - return - } - default: - panic("unreachable") - } - } -} - -// generateImpliedEndTags pops nodes off the stack of open elements as long as -// the top node has a tag name of dd, dt, li, optgroup, option, p, rb, rp, rt or rtc. -// If exceptions are specified, nodes with that name will not be popped off. -func (p *parser) generateImpliedEndTags(exceptions ...string) { - var i int -loop: - for i = len(p.oe) - 1; i >= 0; i-- { - n := p.oe[i] - if n.Type == ElementNode { - switch n.DataAtom { - case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc: - for _, except := range exceptions { - if n.Data == except { - break loop - } - } - continue - } - } - break - } - - p.oe = p.oe[:i+1] -} - -// addChild adds a child node n to the top element, and pushes n onto the stack -// of open elements if it is an element node. -func (p *parser) addChild(n *Node) { - if p.shouldFosterParent() { - p.fosterParent(n) - } else { - p.top().AppendChild(n) - } - - if n.Type == ElementNode { - p.oe = append(p.oe, n) - } -} - -// shouldFosterParent returns whether the next node to be added should be -// foster parented. -func (p *parser) shouldFosterParent() bool { - if p.fosterParenting { - switch p.top().DataAtom { - case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr: - return true - } - } - return false -} - -// fosterParent adds a child node according to the foster parenting rules. -// Section 12.2.6.1, "foster parenting". -func (p *parser) fosterParent(n *Node) { - var table, parent, prev, template *Node - var i int - for i = len(p.oe) - 1; i >= 0; i-- { - if p.oe[i].DataAtom == a.Table { - table = p.oe[i] - break - } - } - - var j int - for j = len(p.oe) - 1; j >= 0; j-- { - if p.oe[j].DataAtom == a.Template { - template = p.oe[j] - break - } - } - - if template != nil && (table == nil || j > i) { - template.AppendChild(n) - return - } - - if table == nil { - // The foster parent is the html element. - parent = p.oe[0] - } else { - parent = table.Parent - } - if parent == nil { - parent = p.oe[i-1] - } - - if table != nil { - prev = table.PrevSibling - } else { - prev = parent.LastChild - } - if prev != nil && prev.Type == TextNode && n.Type == TextNode { - prev.Data += n.Data - return - } - - parent.InsertBefore(n, table) -} - -// addText adds text to the preceding node if it is a text node, or else it -// calls addChild with a new text node. -func (p *parser) addText(text string) { - if text == "" { - return - } - - if p.shouldFosterParent() { - p.fosterParent(&Node{ - Type: TextNode, - Data: text, - }) - return - } - - t := p.top() - if n := t.LastChild; n != nil && n.Type == TextNode { - n.Data += text - return - } - p.addChild(&Node{ - Type: TextNode, - Data: text, - }) -} - -// addElement adds a child element based on the current token. -func (p *parser) addElement() { - p.addChild(&Node{ - Type: ElementNode, - DataAtom: p.tok.DataAtom, - Data: p.tok.Data, - Attr: p.tok.Attr, - }) -} - -// Section 12.2.4.3. -func (p *parser) addFormattingElement() { - tagAtom, attr := p.tok.DataAtom, p.tok.Attr - p.addElement() - - // Implement the Noah's Ark clause, but with three per family instead of two. - identicalElements := 0 -findIdenticalElements: - for i := len(p.afe) - 1; i >= 0; i-- { - n := p.afe[i] - if n.Type == scopeMarkerNode { - break - } - if n.Type != ElementNode { - continue - } - if n.Namespace != "" { - continue - } - if n.DataAtom != tagAtom { - continue - } - if len(n.Attr) != len(attr) { - continue - } - compareAttributes: - for _, t0 := range n.Attr { - for _, t1 := range attr { - if t0.Key == t1.Key && t0.Namespace == t1.Namespace && t0.Val == t1.Val { - // Found a match for this attribute, continue with the next attribute. - continue compareAttributes - } - } - // If we get here, there is no attribute that matches a. - // Therefore the element is not identical to the new one. - continue findIdenticalElements - } - - identicalElements++ - if identicalElements >= 3 { - p.afe.remove(n) - } - } - - p.afe = append(p.afe, p.top()) -} - -// Section 12.2.4.3. -func (p *parser) clearActiveFormattingElements() { - for { - n := p.afe.pop() - if len(p.afe) == 0 || n.Type == scopeMarkerNode { - return - } - } -} - -// Section 12.2.4.3. -func (p *parser) reconstructActiveFormattingElements() { - n := p.afe.top() - if n == nil { - return - } - if n.Type == scopeMarkerNode || p.oe.index(n) != -1 { - return - } - i := len(p.afe) - 1 - for n.Type != scopeMarkerNode && p.oe.index(n) == -1 { - if i == 0 { - i = -1 - break - } - i-- - n = p.afe[i] - } - for { - i++ - clone := p.afe[i].clone() - p.addChild(clone) - p.afe[i] = clone - if i == len(p.afe)-1 { - break - } - } -} - -// Section 12.2.5. -func (p *parser) acknowledgeSelfClosingTag() { - p.hasSelfClosingToken = false -} - -// An insertion mode (section 12.2.4.1) is the state transition function from -// a particular state in the HTML5 parser's state machine. It updates the -// parser's fields depending on parser.tok (where ErrorToken means EOF). -// It returns whether the token was consumed. -type insertionMode func(*parser) bool - -// setOriginalIM sets the insertion mode to return to after completing a text or -// inTableText insertion mode. -// Section 12.2.4.1, "using the rules for". -func (p *parser) setOriginalIM() { - if p.originalIM != nil { - panic("html: bad parser state: originalIM was set twice") - } - p.originalIM = p.im -} - -// Section 12.2.4.1, "reset the insertion mode". -func (p *parser) resetInsertionMode() { - for i := len(p.oe) - 1; i >= 0; i-- { - n := p.oe[i] - last := i == 0 - if last && p.context != nil { - n = p.context - } - - switch n.DataAtom { - case a.Select: - if !last { - for ancestor, first := n, p.oe[0]; ancestor != first; { - if ancestor == first { - break - } - ancestor = p.oe[p.oe.index(ancestor)-1] - switch ancestor.DataAtom { - case a.Template: - p.im = inSelectIM - return - case a.Table: - p.im = inSelectInTableIM - return - } - } - } - p.im = inSelectIM - case a.Td, a.Th: - // TODO: remove this divergence from the HTML5 spec. - // - // See https://bugs.chromium.org/p/chromium/issues/detail?id=829668 - p.im = inCellIM - case a.Tr: - p.im = inRowIM - case a.Tbody, a.Thead, a.Tfoot: - p.im = inTableBodyIM - case a.Caption: - p.im = inCaptionIM - case a.Colgroup: - p.im = inColumnGroupIM - case a.Table: - p.im = inTableIM - case a.Template: - // TODO: remove this divergence from the HTML5 spec. - if n.Namespace != "" { - continue - } - p.im = p.templateStack.top() - case a.Head: - // TODO: remove this divergence from the HTML5 spec. - // - // See https://bugs.chromium.org/p/chromium/issues/detail?id=829668 - p.im = inHeadIM - case a.Body: - p.im = inBodyIM - case a.Frameset: - p.im = inFramesetIM - case a.Html: - if p.head == nil { - p.im = beforeHeadIM - } else { - p.im = afterHeadIM - } - default: - if last { - p.im = inBodyIM - return - } - continue - } - return - } -} - -const whitespace = " \t\r\n\f" - -// Section 12.2.6.4.1. -func initialIM(p *parser) bool { - switch p.tok.Type { - case TextToken: - p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) - if len(p.tok.Data) == 0 { - // It was all whitespace, so ignore it. - return true - } - case CommentToken: - p.doc.AppendChild(&Node{ - Type: CommentNode, - Data: p.tok.Data, - }) - return true - case DoctypeToken: - n, quirks := parseDoctype(p.tok.Data) - p.doc.AppendChild(n) - p.quirks = quirks - p.im = beforeHTMLIM - return true - } - p.quirks = true - p.im = beforeHTMLIM - return false -} - -// Section 12.2.6.4.2. -func beforeHTMLIM(p *parser) bool { - switch p.tok.Type { - case DoctypeToken: - // Ignore the token. - return true - case TextToken: - p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) - if len(p.tok.Data) == 0 { - // It was all whitespace, so ignore it. - return true - } - case StartTagToken: - if p.tok.DataAtom == a.Html { - p.addElement() - p.im = beforeHeadIM - return true - } - case EndTagToken: - switch p.tok.DataAtom { - case a.Head, a.Body, a.Html, a.Br: - p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) - return false - default: - // Ignore the token. - return true - } - case CommentToken: - p.doc.AppendChild(&Node{ - Type: CommentNode, - Data: p.tok.Data, - }) - return true - } - p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) - return false -} - -// Section 12.2.6.4.3. -func beforeHeadIM(p *parser) bool { - switch p.tok.Type { - case TextToken: - p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) - if len(p.tok.Data) == 0 { - // It was all whitespace, so ignore it. - return true - } - case StartTagToken: - switch p.tok.DataAtom { - case a.Head: - p.addElement() - p.head = p.top() - p.im = inHeadIM - return true - case a.Html: - return inBodyIM(p) - } - case EndTagToken: - switch p.tok.DataAtom { - case a.Head, a.Body, a.Html, a.Br: - p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) - return false - default: - // Ignore the token. - return true - } - case CommentToken: - p.addChild(&Node{ - Type: CommentNode, - Data: p.tok.Data, - }) - return true - case DoctypeToken: - // Ignore the token. - return true - } - - p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) - return false -} - -// Section 12.2.6.4.4. -func inHeadIM(p *parser) bool { - switch p.tok.Type { - case TextToken: - s := strings.TrimLeft(p.tok.Data, whitespace) - if len(s) < len(p.tok.Data) { - // Add the initial whitespace to the current node. - p.addText(p.tok.Data[:len(p.tok.Data)-len(s)]) - if s == "" { - return true - } - p.tok.Data = s - } - case StartTagToken: - switch p.tok.DataAtom { - case a.Html: - return inBodyIM(p) - case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta: - p.addElement() - p.oe.pop() - p.acknowledgeSelfClosingTag() - return true - case a.Script, a.Title, a.Noscript, a.Noframes, a.Style: - p.addElement() - p.setOriginalIM() - p.im = textIM - return true - case a.Head: - // Ignore the token. - return true - case a.Template: - p.addElement() - p.afe = append(p.afe, &scopeMarker) - p.framesetOK = false - p.im = inTemplateIM - p.templateStack = append(p.templateStack, inTemplateIM) - return true - } - case EndTagToken: - switch p.tok.DataAtom { - case a.Head: - p.oe.pop() - p.im = afterHeadIM - return true - case a.Body, a.Html, a.Br: - p.parseImpliedToken(EndTagToken, a.Head, a.Head.String()) - return false - case a.Template: - if !p.oe.contains(a.Template) { - return true - } - // TODO: remove this divergence from the HTML5 spec. - // - // See https://bugs.chromium.org/p/chromium/issues/detail?id=829668 - p.generateImpliedEndTags() - for i := len(p.oe) - 1; i >= 0; i-- { - if n := p.oe[i]; n.Namespace == "" && n.DataAtom == a.Template { - p.oe = p.oe[:i] - break - } - } - p.clearActiveFormattingElements() - p.templateStack.pop() - p.resetInsertionMode() - return true - default: - // Ignore the token. - return true - } - case CommentToken: - p.addChild(&Node{ - Type: CommentNode, - Data: p.tok.Data, - }) - return true - case DoctypeToken: - // Ignore the token. - return true - } - - p.parseImpliedToken(EndTagToken, a.Head, a.Head.String()) - return false -} - -// Section 12.2.6.4.6. -func afterHeadIM(p *parser) bool { - switch p.tok.Type { - case TextToken: - s := strings.TrimLeft(p.tok.Data, whitespace) - if len(s) < len(p.tok.Data) { - // Add the initial whitespace to the current node. - p.addText(p.tok.Data[:len(p.tok.Data)-len(s)]) - if s == "" { - return true - } - p.tok.Data = s - } - case StartTagToken: - switch p.tok.DataAtom { - case a.Html: - return inBodyIM(p) - case a.Body: - p.addElement() - p.framesetOK = false - p.im = inBodyIM - return true - case a.Frameset: - p.addElement() - p.im = inFramesetIM - return true - case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Template, a.Title: - p.oe = append(p.oe, p.head) - defer p.oe.remove(p.head) - return inHeadIM(p) - case a.Head: - // Ignore the token. - return true - } - case EndTagToken: - switch p.tok.DataAtom { - case a.Body, a.Html, a.Br: - // Drop down to creating an implied tag. - case a.Template: - return inHeadIM(p) - default: - // Ignore the token. - return true - } - case CommentToken: - p.addChild(&Node{ - Type: CommentNode, - Data: p.tok.Data, - }) - return true - case DoctypeToken: - // Ignore the token. - return true - } - - p.parseImpliedToken(StartTagToken, a.Body, a.Body.String()) - p.framesetOK = true - return false -} - -// copyAttributes copies attributes of src not found on dst to dst. -func copyAttributes(dst *Node, src Token) { - if len(src.Attr) == 0 { - return - } - attr := map[string]string{} - for _, t := range dst.Attr { - attr[t.Key] = t.Val - } - for _, t := range src.Attr { - if _, ok := attr[t.Key]; !ok { - dst.Attr = append(dst.Attr, t) - attr[t.Key] = t.Val - } - } -} - -// Section 12.2.6.4.7. -func inBodyIM(p *parser) bool { - switch p.tok.Type { - case TextToken: - d := p.tok.Data - switch n := p.oe.top(); n.DataAtom { - case a.Pre, a.Listing: - if n.FirstChild == nil { - // Ignore a newline at the start of a
 block.
-				if d != "" && d[0] == '\r' {
-					d = d[1:]
-				}
-				if d != "" && d[0] == '\n' {
-					d = d[1:]
-				}
-			}
-		}
-		d = strings.Replace(d, "\x00", "", -1)
-		if d == "" {
-			return true
-		}
-		p.reconstructActiveFormattingElements()
-		p.addText(d)
-		if p.framesetOK && strings.TrimLeft(d, whitespace) != "" {
-			// There were non-whitespace characters inserted.
-			p.framesetOK = false
-		}
-	case StartTagToken:
-		switch p.tok.DataAtom {
-		case a.Html:
-			if p.oe.contains(a.Template) {
-				return true
-			}
-			copyAttributes(p.oe[0], p.tok)
-		case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Template, a.Title:
-			return inHeadIM(p)
-		case a.Body:
-			if p.oe.contains(a.Template) {
-				return true
-			}
-			if len(p.oe) >= 2 {
-				body := p.oe[1]
-				if body.Type == ElementNode && body.DataAtom == a.Body {
-					p.framesetOK = false
-					copyAttributes(body, p.tok)
-				}
-			}
-		case a.Frameset:
-			if !p.framesetOK || len(p.oe) < 2 || p.oe[1].DataAtom != a.Body {
-				// Ignore the token.
-				return true
-			}
-			body := p.oe[1]
-			if body.Parent != nil {
-				body.Parent.RemoveChild(body)
-			}
-			p.oe = p.oe[:1]
-			p.addElement()
-			p.im = inFramesetIM
-			return true
-		case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Menu, a.Nav, a.Ol, a.P, a.Section, a.Summary, a.Ul:
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
-			p.popUntil(buttonScope, a.P)
-			switch n := p.top(); n.DataAtom {
-			case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
-				p.oe.pop()
-			}
-			p.addElement()
-		case a.Pre, a.Listing:
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-			// The newline, if any, will be dealt with by the TextToken case.
-			p.framesetOK = false
-		case a.Form:
-			if p.form != nil && !p.oe.contains(a.Template) {
-				// Ignore the token
-				return true
-			}
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-			if !p.oe.contains(a.Template) {
-				p.form = p.top()
-			}
-		case a.Li:
-			p.framesetOK = false
-			for i := len(p.oe) - 1; i >= 0; i-- {
-				node := p.oe[i]
-				switch node.DataAtom {
-				case a.Li:
-					p.oe = p.oe[:i]
-				case a.Address, a.Div, a.P:
-					continue
-				default:
-					if !isSpecialElement(node) {
-						continue
-					}
-				}
-				break
-			}
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-		case a.Dd, a.Dt:
-			p.framesetOK = false
-			for i := len(p.oe) - 1; i >= 0; i-- {
-				node := p.oe[i]
-				switch node.DataAtom {
-				case a.Dd, a.Dt:
-					p.oe = p.oe[:i]
-				case a.Address, a.Div, a.P:
-					continue
-				default:
-					if !isSpecialElement(node) {
-						continue
-					}
-				}
-				break
-			}
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-		case a.Plaintext:
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-		case a.Button:
-			p.popUntil(defaultScope, a.Button)
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-			p.framesetOK = false
-		case a.A:
-			for i := len(p.afe) - 1; i >= 0 && p.afe[i].Type != scopeMarkerNode; i-- {
-				if n := p.afe[i]; n.Type == ElementNode && n.DataAtom == a.A {
-					p.inBodyEndTagFormatting(a.A)
-					p.oe.remove(n)
-					p.afe.remove(n)
-					break
-				}
-			}
-			p.reconstructActiveFormattingElements()
-			p.addFormattingElement()
-		case a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
-			p.reconstructActiveFormattingElements()
-			p.addFormattingElement()
-		case a.Nobr:
-			p.reconstructActiveFormattingElements()
-			if p.elementInScope(defaultScope, a.Nobr) {
-				p.inBodyEndTagFormatting(a.Nobr)
-				p.reconstructActiveFormattingElements()
-			}
-			p.addFormattingElement()
-		case a.Applet, a.Marquee, a.Object:
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-			p.afe = append(p.afe, &scopeMarker)
-			p.framesetOK = false
-		case a.Table:
-			if !p.quirks {
-				p.popUntil(buttonScope, a.P)
-			}
-			p.addElement()
-			p.framesetOK = false
-			p.im = inTableIM
-			return true
-		case a.Area, a.Br, a.Embed, a.Img, a.Input, a.Keygen, a.Wbr:
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-			p.oe.pop()
-			p.acknowledgeSelfClosingTag()
-			if p.tok.DataAtom == a.Input {
-				for _, t := range p.tok.Attr {
-					if t.Key == "type" {
-						if strings.ToLower(t.Val) == "hidden" {
-							// Skip setting framesetOK = false
-							return true
-						}
-					}
-				}
-			}
-			p.framesetOK = false
-		case a.Param, a.Source, a.Track:
-			p.addElement()
-			p.oe.pop()
-			p.acknowledgeSelfClosingTag()
-		case a.Hr:
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-			p.oe.pop()
-			p.acknowledgeSelfClosingTag()
-			p.framesetOK = false
-		case a.Image:
-			p.tok.DataAtom = a.Img
-			p.tok.Data = a.Img.String()
-			return false
-		case a.Isindex:
-			if p.form != nil {
-				// Ignore the token.
-				return true
-			}
-			action := ""
-			prompt := "This is a searchable index. Enter search keywords: "
-			attr := []Attribute{{Key: "name", Val: "isindex"}}
-			for _, t := range p.tok.Attr {
-				switch t.Key {
-				case "action":
-					action = t.Val
-				case "name":
-					// Ignore the attribute.
-				case "prompt":
-					prompt = t.Val
-				default:
-					attr = append(attr, t)
-				}
-			}
-			p.acknowledgeSelfClosingTag()
-			p.popUntil(buttonScope, a.P)
-			p.parseImpliedToken(StartTagToken, a.Form, a.Form.String())
-			if p.form == nil {
-				// NOTE: The 'isindex' element has been removed,
-				// and the 'template' element has not been designed to be
-				// collaborative with the index element.
-				//
-				// Ignore the token.
-				return true
-			}
-			if action != "" {
-				p.form.Attr = []Attribute{{Key: "action", Val: action}}
-			}
-			p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
-			p.parseImpliedToken(StartTagToken, a.Label, a.Label.String())
-			p.addText(prompt)
-			p.addChild(&Node{
-				Type:     ElementNode,
-				DataAtom: a.Input,
-				Data:     a.Input.String(),
-				Attr:     attr,
-			})
-			p.oe.pop()
-			p.parseImpliedToken(EndTagToken, a.Label, a.Label.String())
-			p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
-			p.parseImpliedToken(EndTagToken, a.Form, a.Form.String())
-		case a.Textarea:
-			p.addElement()
-			p.setOriginalIM()
-			p.framesetOK = false
-			p.im = textIM
-		case a.Xmp:
-			p.popUntil(buttonScope, a.P)
-			p.reconstructActiveFormattingElements()
-			p.framesetOK = false
-			p.addElement()
-			p.setOriginalIM()
-			p.im = textIM
-		case a.Iframe:
-			p.framesetOK = false
-			p.addElement()
-			p.setOriginalIM()
-			p.im = textIM
-		case a.Noembed, a.Noscript:
-			p.addElement()
-			p.setOriginalIM()
-			p.im = textIM
-		case a.Select:
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-			p.framesetOK = false
-			p.im = inSelectIM
-			return true
-		case a.Optgroup, a.Option:
-			if p.top().DataAtom == a.Option {
-				p.oe.pop()
-			}
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-		case a.Rb, a.Rtc:
-			if p.elementInScope(defaultScope, a.Ruby) {
-				p.generateImpliedEndTags()
-			}
-			p.addElement()
-		case a.Rp, a.Rt:
-			if p.elementInScope(defaultScope, a.Ruby) {
-				p.generateImpliedEndTags("rtc")
-			}
-			p.addElement()
-		case a.Math, a.Svg:
-			p.reconstructActiveFormattingElements()
-			if p.tok.DataAtom == a.Math {
-				adjustAttributeNames(p.tok.Attr, mathMLAttributeAdjustments)
-			} else {
-				adjustAttributeNames(p.tok.Attr, svgAttributeAdjustments)
-			}
-			adjustForeignAttributes(p.tok.Attr)
-			p.addElement()
-			p.top().Namespace = p.tok.Data
-			if p.hasSelfClosingToken {
-				p.oe.pop()
-				p.acknowledgeSelfClosingTag()
-			}
-			return true
-		case a.Caption, a.Col, a.Colgroup, a.Frame, a.Head, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
-			// Ignore the token.
-		default:
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-		}
-	case EndTagToken:
-		switch p.tok.DataAtom {
-		case a.Body:
-			if p.elementInScope(defaultScope, a.Body) {
-				p.im = afterBodyIM
-			}
-		case a.Html:
-			if p.elementInScope(defaultScope, a.Body) {
-				p.parseImpliedToken(EndTagToken, a.Body, a.Body.String())
-				return false
-			}
-			return true
-		case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul:
-			p.popUntil(defaultScope, p.tok.DataAtom)
-		case a.Form:
-			if p.oe.contains(a.Template) {
-				i := p.indexOfElementInScope(defaultScope, a.Form)
-				if i == -1 {
-					// Ignore the token.
-					return true
-				}
-				p.generateImpliedEndTags()
-				if p.oe[i].DataAtom != a.Form {
-					// Ignore the token.
-					return true
-				}
-				p.popUntil(defaultScope, a.Form)
-			} else {
-				node := p.form
-				p.form = nil
-				i := p.indexOfElementInScope(defaultScope, a.Form)
-				if node == nil || i == -1 || p.oe[i] != node {
-					// Ignore the token.
-					return true
-				}
-				p.generateImpliedEndTags()
-				p.oe.remove(node)
-			}
-		case a.P:
-			if !p.elementInScope(buttonScope, a.P) {
-				p.parseImpliedToken(StartTagToken, a.P, a.P.String())
-			}
-			p.popUntil(buttonScope, a.P)
-		case a.Li:
-			p.popUntil(listItemScope, a.Li)
-		case a.Dd, a.Dt:
-			p.popUntil(defaultScope, p.tok.DataAtom)
-		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
-			p.popUntil(defaultScope, a.H1, a.H2, a.H3, a.H4, a.H5, a.H6)
-		case a.A, a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.Nobr, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
-			p.inBodyEndTagFormatting(p.tok.DataAtom)
-		case a.Applet, a.Marquee, a.Object:
-			if p.popUntil(defaultScope, p.tok.DataAtom) {
-				p.clearActiveFormattingElements()
-			}
-		case a.Br:
-			p.tok.Type = StartTagToken
-			return false
-		case a.Template:
-			return inHeadIM(p)
-		default:
-			p.inBodyEndTagOther(p.tok.DataAtom)
-		}
-	case CommentToken:
-		p.addChild(&Node{
-			Type: CommentNode,
-			Data: p.tok.Data,
-		})
-	case ErrorToken:
-		// TODO: remove this divergence from the HTML5 spec.
-		if len(p.templateStack) > 0 {
-			p.im = inTemplateIM
-			return false
-		} else {
-			for _, e := range p.oe {
-				switch e.DataAtom {
-				case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc, a.Tbody, a.Td, a.Tfoot, a.Th,
-					a.Thead, a.Tr, a.Body, a.Html:
-				default:
-					return true
-				}
-			}
-		}
-	}
-
-	return true
-}
-
-func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom) {
-	// This is the "adoption agency" algorithm, described at
-	// https://html.spec.whatwg.org/multipage/syntax.html#adoptionAgency
-
-	// TODO: this is a fairly literal line-by-line translation of that algorithm.
-	// Once the code successfully parses the comprehensive test suite, we should
-	// refactor this code to be more idiomatic.
-
-	// Steps 1-4. The outer loop.
-	for i := 0; i < 8; i++ {
-		// Step 5. Find the formatting element.
-		var formattingElement *Node
-		for j := len(p.afe) - 1; j >= 0; j-- {
-			if p.afe[j].Type == scopeMarkerNode {
-				break
-			}
-			if p.afe[j].DataAtom == tagAtom {
-				formattingElement = p.afe[j]
-				break
-			}
-		}
-		if formattingElement == nil {
-			p.inBodyEndTagOther(tagAtom)
-			return
-		}
-		feIndex := p.oe.index(formattingElement)
-		if feIndex == -1 {
-			p.afe.remove(formattingElement)
-			return
-		}
-		if !p.elementInScope(defaultScope, tagAtom) {
-			// Ignore the tag.
-			return
-		}
-
-		// Steps 9-10. Find the furthest block.
-		var furthestBlock *Node
-		for _, e := range p.oe[feIndex:] {
-			if isSpecialElement(e) {
-				furthestBlock = e
-				break
-			}
-		}
-		if furthestBlock == nil {
-			e := p.oe.pop()
-			for e != formattingElement {
-				e = p.oe.pop()
-			}
-			p.afe.remove(e)
-			return
-		}
-
-		// Steps 11-12. Find the common ancestor and bookmark node.
-		commonAncestor := p.oe[feIndex-1]
-		bookmark := p.afe.index(formattingElement)
-
-		// Step 13. The inner loop. Find the lastNode to reparent.
-		lastNode := furthestBlock
-		node := furthestBlock
-		x := p.oe.index(node)
-		// Steps 13.1-13.2
-		for j := 0; j < 3; j++ {
-			// Step 13.3.
-			x--
-			node = p.oe[x]
-			// Step 13.4 - 13.5.
-			if p.afe.index(node) == -1 {
-				p.oe.remove(node)
-				continue
-			}
-			// Step 13.6.
-			if node == formattingElement {
-				break
-			}
-			// Step 13.7.
-			clone := node.clone()
-			p.afe[p.afe.index(node)] = clone
-			p.oe[p.oe.index(node)] = clone
-			node = clone
-			// Step 13.8.
-			if lastNode == furthestBlock {
-				bookmark = p.afe.index(node) + 1
-			}
-			// Step 13.9.
-			if lastNode.Parent != nil {
-				lastNode.Parent.RemoveChild(lastNode)
-			}
-			node.AppendChild(lastNode)
-			// Step 13.10.
-			lastNode = node
-		}
-
-		// Step 14. Reparent lastNode to the common ancestor,
-		// or for misnested table nodes, to the foster parent.
-		if lastNode.Parent != nil {
-			lastNode.Parent.RemoveChild(lastNode)
-		}
-		switch commonAncestor.DataAtom {
-		case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
-			p.fosterParent(lastNode)
-		default:
-			commonAncestor.AppendChild(lastNode)
-		}
-
-		// Steps 15-17. Reparent nodes from the furthest block's children
-		// to a clone of the formatting element.
-		clone := formattingElement.clone()
-		reparentChildren(clone, furthestBlock)
-		furthestBlock.AppendChild(clone)
-
-		// Step 18. Fix up the list of active formatting elements.
-		if oldLoc := p.afe.index(formattingElement); oldLoc != -1 && oldLoc < bookmark {
-			// Move the bookmark with the rest of the list.
-			bookmark--
-		}
-		p.afe.remove(formattingElement)
-		p.afe.insert(bookmark, clone)
-
-		// Step 19. Fix up the stack of open elements.
-		p.oe.remove(formattingElement)
-		p.oe.insert(p.oe.index(furthestBlock)+1, clone)
-	}
-}
-
-// inBodyEndTagOther performs the "any other end tag" algorithm for inBodyIM.
-// "Any other end tag" handling from 12.2.6.5 The rules for parsing tokens in foreign content
-// https://html.spec.whatwg.org/multipage/syntax.html#parsing-main-inforeign
-func (p *parser) inBodyEndTagOther(tagAtom a.Atom) {
-	for i := len(p.oe) - 1; i >= 0; i-- {
-		if p.oe[i].DataAtom == tagAtom {
-			p.oe = p.oe[:i]
-			break
-		}
-		if isSpecialElement(p.oe[i]) {
-			break
-		}
-	}
-}
-
-// Section 12.2.6.4.8.
-func textIM(p *parser) bool {
-	switch p.tok.Type {
-	case ErrorToken:
-		p.oe.pop()
-	case TextToken:
-		d := p.tok.Data
-		if n := p.oe.top(); n.DataAtom == a.Textarea && n.FirstChild == nil {
-			// Ignore a newline at the start of a